source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
plugin_service.py
|
import asyncio
import threading
import atexit
import json
import os
import shutil
import time
import traceback
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from os import environ
import yaml
from apscheduler.schedulers.background import BackgroundScheduler
from flask import jsonify, make_response
from .tsanaclient import TSANAClient
from .util.constant import InferenceState
from .util.constant import ModelState
from .util.constant import STATUS_SUCCESS, STATUS_FAIL
from .util.constant import INSTANCE_ID_KEY
from .util.context import Context
from .util.meta import insert_meta, get_meta, update_state, get_model_list, clear_state_when_necessary
from .util.model import upload_model, download_model
from .util.monitor import init_monitor, run_monitor, stop_monitor
from .util.timeutil import str_to_dt
from .util.kafka_operator import send_message, consume_loop
from .util.job_record import JobRecord
import zlib
import base64
import gc
#async infras
#executor = ProcessPoolExecutor(max_workers=2)
#ThreadPool easy for debug
#executor = ThreadPoolExecutor(max_workers=2)
#monitor infras
sched = BackgroundScheduler()
from telemetry import log
def load_config(path):
try:
with open(path, 'r') as config_file:
config_yaml = yaml.safe_load(config_file)
Config = namedtuple('Config', sorted(config_yaml))
config = Config(**config_yaml)
return config
except Exception:
return None
class PluginService():
def __init__(self, trainable=True):
config_file = environ.get('SERVICE_CONFIG_FILE')
config = load_config(config_file)
if config is None:
log.error("No configuration '%s', or the configuration is not in JSON format. " % (config_file))
exit()
self.config = config
self.tsanaclient = TSANAClient()
self.trainable = trainable
if self.trainable:
init_monitor(config)
sched.add_job(func=lambda: run_monitor(config), trigger="interval", seconds=10)
sched.start()
atexit.register(lambda: stop_monitor(config))
atexit.register(lambda: sched.shutdown())
self.training_topic = self.__class__.__name__ + '-training'
training_thread = threading.Thread(target=consume_loop, args=(self.train_wrapper, self.training_topic), daemon=True)
training_thread.start()
self.inference_topic = self.__class__.__name__ + '-inference'
inference_thread = threading.Thread(target=consume_loop, args=(self.inference_wrapper, self.inference_topic), daemon=True)
inference_thread.start()
# verify parameters
# Parameters:
# parameters: a dict object which should includes
# apiEndpoint: api endpoint for specific user
# apiKey: api key for specific user
# groupId: groupId in TSANA, which is copied from inference request, or from the entity
# series_sets: Array of series set
# context: request context include subscription and model_id
# Return:
# STATUS_SUCCESS/STATUS_FAIL, error_message
def do_verify(self, parameters, context:Context):
return STATUS_SUCCESS, ''
# check if need to retrain model this time
# Parameters:
# current_series_set: series set used in instance now
# current_params: params used in instance now
# new_series_set: series set used in this request
# new_params: params used in this request
# context: request context include subscription and model_id
# Return:
# True/False
def need_retrain(self, current_series_set, current_params, new_series_set, new_params, context:Context):
return True
# train model
# Parameters:
# model_dir: output dir for model training result, framework will handle model storage
# parameters: training request body which include
# apiEndpoint: api endpoint for specific user
# apiKey: api key for specific user
# groupId: groupId in TSANA
# seriesSets: Array of series set
# startTime: start timestamp
# endTime: end timestamp
# instance: an info dict for this instance which includes
# instanceId: UUID for this instance
# params: training parameters for this request
# series: an array of Series object or None if config.auto_data_retrieving is False
# Series include
# series_id: UUID
# dim: dimension dict for this series
# fields: 1-d string array, ['time', '__VAL__', '__FIELD__.ExpectedValue', '__FIELD__.IsAnomaly', '__FIELD__.PredictionValue', '__FIELD__.PredictionModelScore', '__FIELD__.IsSuppress', '__FIELD__.Period', '__FIELD__.CostPoint', '__FIELD__.Mean', '__FIELD__.STD', '__FIELD__.TrendChangeAnnotate', '__FIELD__.TrendChang...tateIgnore', '__FIELD__.AnomalyAnnotate', ...]
# value: 2-d array, [['2020-10-12T17:55:00Z', 1.0, None, None, None, None, None, None, None, None, None, None, None, None, ...]]
# context: request context include subscription and model_id
# Return:
# STATUS_SUCCESS/STATUS_FAIL, error_message
def do_train(self, model_dir, parameters, series, context:Context):
return STATUS_SUCCESS, ''
# inference model
# Parameters:
# model_dir: input dir for model inference, model has been download and unpacked to this dir
# parameters: inference request body which include
# apiEndpoint: api endpoint for specific user
# apiKey: api key for specific user
# groupId: groupId in TSANA
# seriesSets: Array of series set
# startTime: start timestamp
# endTime: end timestamp
# instance: an info dict for this instance which includes
# instanceId: UUID for this instance
# params: inference parameters for this request
# target: a dict for inference result which include
# dimensions: dimension name list for target, defined when register plugin
# metrics: metric name list for target, defined when register plugin
# granularityName: granularity name for target, defined when register plugin
# hookIds: hook id list, defined when register plugin
# series: an array of Series object or None if config.auto_data_retrieving is False
# Series include
# series_id: UUID
# dim: dimension dict for this series
# fields: 1-d string array, ['time', '__VAL__', '__FIELD__.ExpectedValue', '__FIELD__.IsAnomaly', '__FIELD__.PredictionValue', '__FIELD__.PredictionModelScore', '__FIELD__.IsSuppress', '__FIELD__.Period', '__FIELD__.CostPoint', '__FIELD__.Mean', '__FIELD__.STD', '__FIELD__.TrendChangeAnnotate', '__FIELD__.TrendChang...tateIgnore', '__FIELD__.AnomalyAnnotate', ...]
# value: 2-d array, [['2020-10-12T17:55:00Z', 1.0, None, None, None, None, None, None, None, None, None, None, None, None, ...]]
# context: request context include subscription and model_id
# Return:
# result: STATUS_SUCCESS/STATUS_FAIL
# values: a list of value dict or None if you do not need framework to handle inference result storge, this value dict should include
# metricId: UUID, comes from metrics segment of target of request body
# dimension: dimension dict for this series, dimension names come from target segment of request body
# timestamps: string timestamps list
# values: double type value list, matching timestamps
# fields: field names list, optional
# fieldValues: 2-d array which include a value list for each field, optional
# message: error message
def do_inference(self, model_dir, parameters, series, context:Context):
return STATUS_SUCCESS, None, ''
def do_delete(self, parameters, model_id):
return STATUS_SUCCESS, ''
def get_data_time_range(self, parameters, is_training=False):
return str_to_dt(parameters['startTime']), str_to_dt(parameters['endTime'])
def train_wrapper(self, message):
start = time.time()
subscription = message['subscription']
model_id = message['model_id']
task_id = message['job_id']
parameters = message['params']
model_dir = None
log.info("Start train wrapper for model %s by %s " % (model_id, subscription))
try:
self.tsanaclient.save_training_status(task_id, parameters, ModelState.Pending.name)
model_dir = os.path.join(self.config.model_dir, subscription + '_' + model_id + '_' + str(time.time()))
os.makedirs(model_dir, exist_ok=True)
series = None
if self.config.auto_data_retrieving:
start_time, end_time = self.get_data_time_range(parameters, True)
series = self.tsanaclient.get_timeseries_gw(parameters, parameters['seriesSets'], start_time, end_time)
update_state(self.config, subscription, model_id, ModelState.Training)
self.tsanaclient.save_training_status(task_id, parameters, ModelState.Training.name)
result, message = self.do_train(model_dir, parameters, series, Context(subscription, model_id, task_id))
if result == STATUS_SUCCESS:
self.train_callback(subscription, model_id, task_id, model_dir, parameters, ModelState.Ready, None)
else:
raise Exception(message)
except Exception as e:
self.train_callback(subscription, model_id, task_id, None, parameters, ModelState.Failed, str(e))
result = STATUS_FAIL
finally:
if model_dir is not None:
shutil.rmtree(model_dir, ignore_errors=True)
total_time = (time.time() - start)
log.duration("training_task_duration", total_time, model_id=model_id, task_id=task_id, result=result, endpoint=parameters['apiEndpoint'], group_id=parameters['groupId'], group_name=parameters['groupName'].replace(' ', '_'), instance_id=parameters['instance']['instanceId'], instance_name=parameters['instance']['instanceName'].replace(' ', '_'))
log.count("training_task_count", 1, model_id=model_id, task_id=task_id, result=result, endpoint=parameters['apiEndpoint'], group_id=parameters['groupId'], group_name=parameters['groupName'].replace(' ', '_'), instance_id=parameters['instance']['instanceId'], instance_name=parameters['instance']['instanceName'].replace(' ', '_'))
gc.collect()
return STATUS_SUCCESS, ''
# inference_window: 30
# endTime: endtime
def inference_wrapper(self, message):
start = time.time()
subscription = message['subscription']
model_id = message['model_id']
task_id = message['job_id']
parameters = message['params']
log.info("Start inference wrapper %s by %s " % (model_id, subscription))
try:
self.tsanaclient.save_inference_status(task_id, parameters, InferenceState.Pending.name)
result, message = self.do_verify(parameters, Context(subscription, model_id, task_id))
if result != STATUS_SUCCESS:
raise Exception('Verify failed! ' + message)
model_dir = os.path.join(self.config.model_dir, subscription + '_' + model_id + '_' + str(time.time()))
os.makedirs(model_dir, exist_ok=True)
if self.trainable:
download_model(self.config, subscription, model_id, model_dir)
start_time, end_time = self.get_data_time_range(parameters)
if self.config.auto_data_retrieving:
series = self.tsanaclient.get_timeseries_gw(parameters, parameters['seriesSets'], start_time, end_time)
else:
series = None
self.tsanaclient.save_inference_status(task_id, parameters, InferenceState.Running.name)
result, values, message = self.do_inference(model_dir, parameters, series, Context(subscription, model_id, task_id))
self.inference_callback(subscription, model_id, task_id, parameters, result, values, message)
except Exception as e:
self.inference_callback(subscription, model_id, task_id, parameters, STATUS_FAIL, None, str(e))
finally:
shutil.rmtree(model_dir, ignore_errors=True)
total_time = (time.time() - start)
log.duration("inference_task_duration", total_time, model_id=model_id, task_id=task_id, result=result, endpoint=parameters['apiEndpoint'], group_id=parameters['groupId'], group_name=parameters['groupName'].replace(' ', '_'), instance_id=parameters['instance']['instanceId'], instance_name=parameters['instance']['instanceName'].replace(' ', '_'))
log.count("inference_task_count", 1, model_id=model_id, task_id=task_id, result=result, endpoint=parameters['apiEndpoint'], group_id=parameters['groupId'], group_name=parameters['groupName'].replace(' ', '_'), instance_id=parameters['instance']['instanceId'], instance_name=parameters['instance']['instanceName'].replace(' ', '_'))
gc.collect()
return STATUS_SUCCESS, ''
def train_callback(self, subscription, model_id, task_id, model_dir, parameters, model_state, last_error=None):
try:
meta = get_meta(self.config, subscription, model_id)
if meta is None or meta['state'] == ModelState.Deleted.name:
return STATUS_FAIL, 'Model is not found! '
if model_state == ModelState.Ready:
result, message = upload_model(self.config, subscription, model_id, model_dir)
if result != STATUS_SUCCESS:
model_state = ModelState.Failed
last_error = 'Model storage failed! ' + message
except Exception as e:
model_state = ModelState.Failed
last_error = str(e)
raise e
finally:
update_state(self.config, subscription, model_id, model_state, None, last_error)
self.tsanaclient.save_training_status(task_id, parameters, model_state.name, last_error)
self.tsanaclient.save_training_result(parameters, model_id, model_state.name, last_error)
error_message = last_error + '\n' + traceback.format_exc() if model_state != ModelState.Ready else None
log.info("Training callback by %s, model_id = %s, task_id = %s, state = %s, last_error = %s" % (subscription, model_id, task_id, model_state, error_message if error_message is not None else ''))
def inference_callback(self, subscription, model_id, task_id, parameters, result, values, last_error=None):
try:
if result == STATUS_SUCCESS and values != None:
for value in values:
result, last_error = self.tsanaclient.save_data_points(parameters, value['metricId'], value['dimension'], value['timestamps'], value['values'],
value['fields'] if 'fields' in value else None, value['fieldValues'] if 'fieldValues' in value else None)
if result != STATUS_SUCCESS:
break
except Exception as e:
result = STATUS_FAIL
last_error = str(e)
raise e
finally:
if result == STATUS_SUCCESS:
self.tsanaclient.save_inference_status(task_id, parameters, InferenceState.Ready.name)
else:
self.tsanaclient.save_inference_status(task_id, parameters, InferenceState.Failed.name, last_error)
error_message = last_error + '\n' + traceback.format_exc() if result != STATUS_SUCCESS else None
log.info("Inference callback by %s, model_id = %s, task_id = %s, result = %s, last_error = %s" % (subscription, model_id, task_id, result, error_message if error_message is not None else ''))
def train(self, request):
request_body = json.loads(request.data)
instance_id = request_body['instance']['instanceId']
if not self.trainable:
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_SUCCESS, message='Model is not trainable', modelState=ModelState.Ready.name)), 200)
subscription = request.headers.get('apim-subscription-id', 'Official')
request_body[INSTANCE_ID_KEY] = subscription
result, message = self.do_verify(request_body, Context(subscription, '', ''))
if result != STATUS_SUCCESS:
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_FAIL, message='Verify failed! ' + message, modelState=ModelState.Deleted.name)), 400)
models_in_train = []
for model in get_model_list(self.config, subscription):
if 'instanceId' in model and model['instanceId'] == request_body['instance']['instanceId'] and (model['state'] == ModelState.Training.name or model['state'] == ModelState.Pending.name):
models_in_train.append(model['modelId'])
if len(models_in_train) >= self.config.models_in_training_limit_per_instance:
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_FAIL, message='Models in training limit reached! Abort training this time.', modelState=ModelState.Deleted.name)), 400)
log.info('Create training task')
try:
task_id = str(uuid.uuid1())
if 'modelId' in request_body and request_body['modelId']:
model_id = request_body['modelId']
else:
model_id = str(uuid.uuid1())
insert_meta(self.config, subscription, model_id, request_body)
job = JobRecord(task_id, JobRecord.MODE_TRAINING, self.__class__.__name__, model_id, subscription, request_body)
send_message(self.training_topic, dict(job))
log.count("training_task_throughput_in", 1, topic_name=self.training_topic, model_id=model_id, endpoint=request_body['apiEndpoint'], group_id=request_body['groupId'], group_name=request_body['groupName'].replace(' ', '_'), instance_id=request_body['instance']['instanceId'], instance_name=request_body['instance']['instanceName'].replace(' ', '_'))
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId=task_id, result=STATUS_SUCCESS, message='Training task created', modelState=ModelState.Training.name)), 201)
except Exception as e:
meta = get_meta(self.config, subscription, model_id)
error_message = str(e)
if meta is not None:
update_state(self.config, subscription, model_id, ModelState.Failed, None, error_message)
log.error("Create training task failed! subscription = %s, model_id = %s, task_id = %s, last_error = %s" % (subscription, model_id, task_id, error_message + '\n' + traceback.format_exc()))
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId=task_id, result=STATUS_FAIL, message='Fail to create new task ' + error_message, modelState=ModelState.Failed.name)), 400)
def inference(self, request, model_id):
request_body = json.loads(request.data)
instance_id = request_body['instance']['instanceId']
subscription = request.headers.get('apim-subscription-id', 'Official')
request_body[INSTANCE_ID_KEY] = subscription
if self.trainable:
meta = get_meta(self.config, subscription, model_id)
if meta is None:
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId='', result=STATUS_FAIL, message='Model is not found!', modelState=ModelState.Deleted.name)), 400)
if meta['state'] != ModelState.Ready.name:
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId='', result=STATUS_FAIL, message='Cannot do inference right now, status is ' + meta['state'], modelState=meta['state'])), 400)
try:
series_set = json.loads(meta['series_set'])
except:
series_set = json.loads(zlib.decompress(base64.b64decode(meta['series_set'].encode("ascii"))).decode('utf-8'))
para = json.loads(meta['para'])
current_set = json.dumps(series_set, sort_keys=True)
current_params = json.dumps(para, sort_keys=True)
new_set = json.dumps(request_body['seriesSets'], sort_keys=True)
new_params = json.dumps(request_body['instance']['params'], sort_keys=True)
if current_set != new_set or current_params != new_params:
if self.need_retrain(series_set, para, request_body['seriesSets'], request_body['instance']['params'], Context(subscription, model_id, '')):
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId='', result=STATUS_FAIL, message='Inconsistent series sets or params!', modelState=meta['state'])), 400)
log.info('Create inference task')
task_id = str(uuid.uuid1())
job = JobRecord(task_id, JobRecord.MODE_INFERENCE, self.__class__.__name__, model_id, subscription, request_body)
send_message(self.inference_topic, dict(job))
log.count("inference_task_throughput_in", 1, topic_name=self.inference_topic, model_id=model_id, endpoint=request_body['apiEndpoint'], group_id=request_body['groupId'], group_name=request_body['groupName'].replace(' ', '_'), instance_id=request_body['instance']['instanceId'], instance_name=request_body['instance']['instanceName'].replace(' ', '_'))
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId=task_id, result=STATUS_SUCCESS, message='Inference task created', modelState=ModelState.Ready.name)), 201)
def state(self, request, model_id):
if not self.trainable:
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_SUCCESS, message='Model is not trainable', modelState=ModelState.Ready.name)), 200)
try:
subscription = request.headers.get('apim-subscription-id', 'Official')
request_body = json.loads(request.data)
request_body[INSTANCE_ID_KEY] = subscription
meta = get_meta(self.config, subscription, model_id)
if meta == None:
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_FAIL, message='Model is not found!', modelState=ModelState.Deleted.name)), 400)
meta = clear_state_when_necessary(self.config, subscription, model_id, meta)
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_SUCCESS, message=meta['last_error'] if 'last_error' in meta else '', modelState=meta['state'])), 200)
except Exception as e:
error_message = str(e)
log.error("Get model state failed! subscription = %s, model_id = %s, last_error = %s" % (subscription, model_id, error_message + '\n' + traceback.format_exc()))
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_FAIL, message=error_message, modelState=ModelState.Failed.name)), 400)
def list_models(self, request):
subscription = request.headers.get('apim-subscription-id', 'Official')
return make_response(jsonify(get_model_list(self.config, subscription)), 200)
def delete(self, request, model_id):
if not self.trainable:
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_SUCCESS, message='Model is not trainable')), 200)
try:
subscription = request.headers.get('apim-subscription-id', 'Official')
request_body = json.loads(request.data)
request_body[INSTANCE_ID_KEY] = subscription
instance_id = request_body['instance']['instanceId']
result, message = self.do_delete(request_body, model_id)
if result == STATUS_SUCCESS:
update_state(self.config, subscription, model_id, ModelState.Deleted)
return make_response(jsonify(dict(instanceId=instance_id, modelId=model_id, taskId='', result=STATUS_SUCCESS, message='Model {} has been deleted'.format(model_id), modelState=ModelState.Deleted.name)), 200)
else:
raise Exception(message)
except Exception as e:
error_message = str(e)
log.error("Delete model failed! subscription = %s, model_id = %s, last_error = %s" % (subscription, model_id, error_message + '\n' + traceback.format_exc()))
return make_response(jsonify(dict(instanceId='', modelId=model_id, taskId='', result=STATUS_FAIL, message=error_message, modelState=ModelState.Failed.name)), 400)
def verify(self, request):
request_body = json.loads(request.data)
instance_id = request_body['instance']['instanceId']
subscription = request.headers.get('apim-subscription-id', 'Official')
request_body[INSTANCE_ID_KEY] = subscription
try:
result, message = self.do_verify(request_body, Context(subscription, '', ''))
if result != STATUS_SUCCESS:
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_FAIL, message='Verify failed! ' + message, modelState=ModelState.Deleted.name)), 400)
else:
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_SUCCESS, message='Verify successfully! ' + message, modelState=ModelState.Deleted.name)), 200)
except Exception as e:
error_message = str(e)
log.error("Verify parameters failed! subscription = %s, instance_id = %s, last_error = %s" % (subscription, instance_id, error_message + '\n' + traceback.format_exc()))
return make_response(jsonify(dict(instanceId=instance_id, modelId='', taskId='', result=STATUS_FAIL, message='Verify failed! ' + error_message, modelState=ModelState.Deleted.name)), 400)
|
libraries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from sublime import active_window
from os import path
from json import loads
from glob import glob
from threading import Thread
from urllib.parse import urlencode
from urllib.request import Request
from urllib.request import urlopen
from ..api import deviot
from .file import File
from .I18n import I18n
from .messages import Messages
from .quick_panel import quick_panel
from ..platformio.command import Command
from .thread_progress import ThreadProgress
from .tools import get_headers, get_setting, save_setting
class Libraries(Command):
"""
Handle the library API from platformIO
More info: http://docs.platformio.org/en/latest/librarymanager/index.html
"""
def __init__(self, window=None, view=None, feedback=True):
super(Libraries, self).__init__()
self.translate = I18n().translate
self.window = active_window()
self.view = self.window.active_view()
self.lib_file_path = deviot.libraries_data_path()
self.quick_list = []
self.page = 1
self.keyword = ""
self.cwd = None
self.dprint = None
def set_queue(self):
"""Message Instances
Makes all the instances to start to print in the deviot console.
It sends a header string as first message
"""
messages = Messages()
messages.initial_text("deviot_library{0}", deviot.version())
messages.create_panel()
self.init(messages=messages)
self.dprint = messages.print
def search_library(self):
"""Search Library
Opens the input box to search a library
"""
caption = self.translate("search_query")
self.window.show_input_panel(
caption, '', self.download_list_async, None, None)
def download_list_async(self, keyword):
"""Downlad in a Thread
Opens the download_list method in a new thread to avoid blocking
the main thread of sublime text
Arguments:
keyword {str} -- keyword to be search
"""
self.keyword = keyword
thread = Thread(target=self.download_list)
thread.start()
ThreadProgress(thread, self.translate('searching'), '')
def download_list(self):
"""PlatformIO API
Search a library in the platformio API api.platformio.org.
The results are formated in the quick panel way and displayed
on it
Arguments:
keyword {string}:
Keyword to search the library in the platformio API
"""
request = {}
request['query'] = self.keyword
request['page'] = self.page
query = urlencode(request)
url = 'http://api.platformio.org/lib/search?{0}'.format(query)
req = Request(url, headers=get_headers())
response = urlopen(req)
response_list = loads(response.read().decode())
pages = response_list['total'] / response_list['perpage']
page_previous = self.page - 1
page_next = self.page + 1
if(len(response_list['items']) == 0):
self.quick_list.append([self.translate('none_lib_found')])
else:
self.quicked(response_list['items'])
self.quick_list.insert(0, [self.translate('select_library').upper()])
if(self.page > 1):
caption = self.translate("library_page_previous_{0}", page_previous)
self.quick_list.insert(1, [caption, page_previous])
if(self.page < pages):
caption = self.translate("library_page_next_{0}", page_next)
self.quick_list.insert(len(self.quick_list), [caption, page_next])
quick_panel(self.quick_list, self.library_install_async)
def quicked(self, source_list):
"""Quick panel List
Turn the source dictionary list in a only list
format to work properly in the quick panel
Arguments:
source_list {dict} -- dictionary with data
"""
quick_list = []
for item in source_list:
id = item['id']
name = item['name']
description = item['description']
try:
authornames = ", ".join(item['authornames'])
except KeyError:
authornames = ""
info = "{0} | {1}".format(name, authornames)
quick_list.append([info, description, id])
self.quick_list = quick_list
def library_install_async(self, selected):
"""Install in thread
Runs the library_install method to avoid block the main
thread of sublime text
Arguments:
selected {int} -- user selection index
"""
if(selected <= 0):
return
thread = Thread(target=self.library_install, args=(selected,))
thread.start()
ThreadProgress(thread, self.translate('installing'), '')
def library_install(self, selected):
"""Library Install
Run a CLI command with the ID of the library to install. After the
setup finished it adds the library information in the boards.json
file.
Arguments:
selected {int} -- user selection index
"""
if(selected <= 0):
return
try:
list_selection = self.quick_list[selected]
page = int(list_selection[1])
self.page = page
self.download_list_async(self.keyword)
return
except:
pass
lib_id = self.quick_list[selected][2]
lib_name = self.quick_list[selected][0]
self.set_queue()
self.run_command(['lib', '--global', 'install', str(lib_id)])
if(self.exit_code() == 0):
from .syntax import Syntax
quick_list = File(self.lib_file_path).read_json()
quick_list.append(self.quick_list[selected])
File(self.lib_file_path).save_json(quick_list)
Syntax()
def update_library_async(self, selected):
"""Update
Show the installed libraries to search updates
"""
if(selected <= 0):
return
thread = Thread(target=self.update_library, args=(selected,))
thread.start()
ThreadProgress(thread, self.translate('updating'), '')
def update_library(self, selected):
"""Update Library
Run a CLI command with the ID of the library to update
Arguments:
selected {int} -- user selection index.
"""
response_list = self.quick_list
lib_id = self.quick_list[selected][2]
lib_name = self.quick_list[selected][0]
self.set_queue()
self.run_command(['lib', '--global', 'update', str(lib_id)])
def get_installed_list(self, type):
"""Install libraries list
Get the file with the installed libraries. This files
is updated each time the user install or remove a library,
the file is formated in the quick panel way (list)
Arguments:
type {str} -- action to do after show the quick list
"""
quick_list = File(self.lib_file_path).read_json()
self.quick_list = quick_list
self.quick_list.insert(0, [self.translate('select_library').upper()])
if(type == 'remove'):
quick_panel(quick_list, self.remove_library_async)
else:
quick_panel(quick_list, self.update_library_async)
def remove_library_async(self, selected):
"""Remove in a thread
Runs the remove_library method to avoid block the main
thread of sublime text
Arguments:
selected {int} -- user selection index
"""
if(selected <= 0):
return
thread = Thread(target=self.remove_library, args=(selected,))
thread.start()
ThreadProgress(thread, self.translate('removing'), '')
def remove_library(self, selected):
"""Remove Library
Run a CLI command with the ID of the library to uninstall,
it also removes the reference from the libraries.json file.
Arguments:
selected {int} -- user selection index.
"""
lib_name = self.quick_list[selected][0].split(" | ")[0]
self.set_queue()
self.run_command(['lib', '--global', 'uninstall', str(lib_name)])
if(self.exit_code() == 0):
from .syntax import Syntax
self.quick_list.remove(self.quick_list[selected])
self.quick_list.pop(0)
File(self.lib_file_path).save_json(self.quick_list)
Syntax()
def save_installed_list_async(self):
"""Save in thread
Runs the save_installed_list method to avoid block the main
thread of sublime text
"""
thread = Thread(target=self.save_installed_list)
thread.start()
ThreadProgress(thread, self.translate('processing'), '')
def save_installed_list(self):
"""Save installed list
Each time a library is installed or removed, it's stored/delted
in a file (libraries.json). This file is used to avoid the lag
when you run the platformIO command. If for some reason the list
of libraries are corrupted or out of date, this method will updated
the file to get the most recent information
"""
self.set_return = True
self.realtime = False
self.run_command(['lib', '--global', 'list', '--json-output'])
out = self.get_output()
out = loads(out)
self.quicked(out)
File(self.lib_file_path).save_json(self.quick_list)
from .syntax import Syntax
Syntax()
def get_library_folders(platform='all'):
"""Libraries availables
Find the list of all folders that should have libraries.
The main folders are .platformio/lib who is the global folder
where platformio stores the libraries installed
The second one are the libraries inside of the package folder
.platformio/packages. Each package folder contain a list of
default libraries, those libraries are selected according to
the selected option.
Keyword Arguments:
platform {str} -- platform to search (default: {'all'})
Returns:
[list] -- list of folders with the libraries
"""
libraries_folders = []
pio_packages = deviot.pio_packages(all=True)
packages_sub_dirs = glob(pio_packages)
if(platform == 'atmelavr'):
platform = 'avr'
for sub_path in packages_sub_dirs:
if(platform in sub_path or platform == 'all'):
for sub_path in glob(sub_path):
packages = path.join(sub_path, '*')
packages = glob(packages)
for folder in packages:
if('libraries' in folder):
libraries = path.join(folder, '*')
libraries_folders.append(libraries)
pio_lib_path = deviot.pio_library(all=True)
libraries_folders.insert(0, pio_lib_path)
# Add the extra folder if it was set by thes user
extra_folder = get_setting('extra_library', None)
if(extra_folder):
extra_folder = path.join(extra_folder, '*')
libraries_folders.insert(1, extra_folder)
return libraries_folders
def get_library_list(example_list=False, platform="all"):
"""List of Libraries
Make a list of the libraries availables. This list is
used in the import library and examples.
Keyword Arguments:
example_list {bool} -- if it's True, returns a list of examples
inside of the library (default: {False})
platform {str} -- results only in the given platform (default: {"all"})
Returns:
[list/list] -- name of folder and path [[name, path]]
"""
from re import search
libraries_folders = get_library_folders(platform)
quick_list = []
check_list = []
for library in libraries_folders:
sub_library = glob(library)
for content in sub_library:
caption = path.basename(content)
new_caption = caption.split("_ID")
if(new_caption is not None):
caption = new_caption[0]
if('__cores__' in content):
cores = path.join(content, '*')
cores = glob(cores)
for sub_core in cores:
libs_core = path.join(sub_core, '*')
libs_core = glob(libs_core)
board = path.basename(sub_core).capitalize()
for lib_core in libs_core:
caption = path.basename(lib_core)
quick_list.append([caption, board, lib_core])
check_list.append([caption])
if(caption not in quick_list and '__cores__' not in
caption and caption not in check_list):
store_data = True
if(example_list):
examples_path = path.join(content, 'examples')
store_data = True if path.exists(examples_path) else False
quick_list = sorted(quick_list, key=lambda x: (x[0], x[1]))
if(store_data):
quick_list.append([caption, content])
check_list.append(caption)
return quick_list
|
session.py
|
# San Jose State Greenlight scraper objects
# Written by Kevin Tom
import threading
import Queue
import re
import mechanize
from bs4 import BeautifulSoup
class GreenlightSession(object):
def __init__(self, username='', password=''):
self.username = username
self.password = password
self.browser = mechanize.Browser()
if username and password:
self.login()
def login(self):
self.browser.open('http://wpe.sjsu.edu/greenlight/html/login.html')
self.browser.select_form(name='empirical')
self.browser.form['userID'] = self.username
self.browser.form['userPW'] = self.password
response = self.browser.submit()
def browse(self, url):
return self.browser.open(url).read()
class GreenlightScraper(object):
# only around 400 organizations, small data set
def __init__(self, greenlightSession, threads=5):
'''
orgs = {
'name' : {
'classification' : 'string',
'officers' : [],
'description' : 'string'
}
}
'''
self.session = greenlightSession
self.orgs = {}
self.max_threads = threads
# One queue for failed requests, other for successful requests
self.queue = Queue.Queue()
self.queue2 = Queue.Queue()
self.purpose_matcher = re.compile('Purpose: ([\s\S]+?)(?=(?:<br><br>|<br\/><br\/>))')
def scrape(self, retry=10):
self.scrape_directory()
self.scrape_organizations()
print('Retrying scrape session on failed requests')
while retry > 0:
print('Retry attempts remaining: ' + str(retry))
if self.retry_scrape():
break
retry -= 1
return bool(retry)
def scrape_directory(self):
url = 'http://wpe.sjsu.edu/greenlight/pages/public/'
directory = 'http://wpe.sjsu.edu/greenlight/pages/public/directory.php'
soup = BeautifulSoup(self.session.browse(directory), 'html.parser')
div = soup.find_all('div', {'id' : 'col_1_of_2_land'})
for table_html in div:
table = table_html.find_all('tr')
for row in table:
tds = row.find_all('td')
if tds:
self.orgs[tds[0].text] = {}
self.orgs[tds[0].text]['classification'] = tds[1].text
self.orgs[tds[0].text]['officers'] = []
link = row.find_all(href=True)[0]['href'].replace(' ', '%20')
print("Enqueuing: " + tds[0].text)
self.queue.put( ( tds[0].text, url + link ) )
def scrape_organizations(self):
for i in xrange(self.max_threads):
t = threading.Thread(target=self.thread_worker)
t.daemon = True
t.start()
print('Thread waiting')
self.queue.join()
print('Done\n\n')
def scrape_org(self, link):
print('Processing organization: ' + link[0]) # org name
try:
soup = BeautifulSoup(self.session.browse(link[1]), 'html.parser')
div = soup.find_all('div', {'id' : 'col_1_of_2_land'})
description = re.search(self.purpose_matcher, str(div[0])).group(1).replace('\n\n', '\n')
self.orgs[link[0]]['description'] = description
for table_html in div:
table = table_html.find_all('tr')
for admin in table:
# Role, First Name, Last Name, Email
# info[0].text, info[1].text, info[2].text, info[3].text]
info = admin.find_all('td')
if info:
self.orgs[link[0]]['officers'].append(info[3].text)
except AttributeError as e:
print e
self.queue2.put(link)
print 'Error, putting to queue2'
except URLError as e:
print(e)
self.queue2.put(link)
print 'URL Error, putting into queue2'
def thread_worker(self):
while True:
link = self.queue.get()
self.scrape_org(link)
self.queue.task_done()
def retry_scrape(self):
print('Failed scrapes: ' + str(self.queue2.qsize()))
if self.queue2.qsize() > 0:
holder = self.queue
self.queue = self.queue2
self.queue2 = holder
while not self.queue.empty():
self.scrape_org(self.queue.get())
if not self.queue2.empty():
print('Still need to retry')
return self.queue2.empty()
|
atrace_agent.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import platform
import re
import sys
import threading
import zlib
import py_utils
from devil.android import device_utils
from devil.android.sdk import version_codes
from py_trace_event import trace_time as trace_time_module
from systrace import trace_result
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The number of seconds to wait for large output from ADB.
ADB_LARGE_OUTPUT_TIMEOUT = 600
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the device).
DEFAULT_CATEGORIES = 'sched,freq,gfx,view,dalvik,webview,'\
'input,disk,am,wm,rs,binder_driver'
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
_FIX_MISSING_TGIDS = True
_FIX_CIRCULAR_TRACES = True
def list_categories(config):
"""List the possible trace event categories.
This function needs the tracing config since it needs to get the serial
number of the device to send a command to.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if not re.match(r'^\s*rs\s*-', c)]
print '\n'.join(categories)
if not devutils.HasRoot():
print '\nNOTE: more categories may be available with adb root\n'
def get_available_categories(config, device_sdk_version):
"""Gets the list of atrace categories available for tracing.
Args:
config: Tracing config.
device_sdk_version: Sdk version int of device to be queried.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories_output = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
categories = [c.split('-')[0].strip() for c in categories_output]
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if c != 'rs']
return categories
def try_create_agent(config):
"""Create an Atrace agent.
Args:
config: Command line config.
"""
if config.target != 'android':
return None
if config.from_file is not None:
return None
if not config.atrace_categories:
return None
# Check device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.JELLY_BEAN_MR2:
print ('Device SDK versions < 18 (Jellybean MR2) not supported.\n'
'Your device SDK version is %d.' % device_sdk_version)
return None
return AtraceAgent(device_sdk_version)
def _construct_extra_atrace_args(config, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
config: Tracing config.
"""
extra_args = []
if config.app_name is not None:
extra_args.extend(['-a', config.app_name])
if config.kfuncs is not None:
extra_args.extend(['-k', config.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(config, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if config.compress_trace_data:
atrace_args.extend(['-z'])
if (config.trace_time is not None) and (config.trace_time > 0):
atrace_args.extend(['-t', str(config.trace_time)])
if (config.trace_buf_size is not None) and (config.trace_buf_size > 0):
atrace_args.extend(['-b', str(config.trace_buf_size)])
elif 'webview' in categories and 'sched' in categories:
# https://crbug.com/814330: webview_startup sometimes exceeds the buffer
# limit, so doubling this.
atrace_args.extend(['-b', '8192'])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(config, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self, device_sdk_version):
super(AtraceAgent, self).__init__()
self._device_sdk_version = device_sdk_version
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._config = None
self._categories = None
def __repr__(self):
return 'atrace'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
assert config.atrace_categories, 'Atrace categories are missing!'
self._config = config
self._categories = config.atrace_categories
if isinstance(self._categories, list):
self._categories = ','.join(self._categories)
avail_cats = get_available_categories(config, self._device_sdk_version)
unavailable = [x for x in self._categories.split(',') if
x not in avail_cats]
self._categories = [x for x in self._categories.split(',') if
x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(config.device_serial_number)
self._device_serial_number = config.device_serial_number
self._tracer_args = _construct_atrace_args(config,
self._categories)
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_start'], check_return=True)
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo trace_event_clock_sync: name=%s >' \
' /sys/kernel/debug/tracing/trace_marker' % sync_id
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time_module.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _stop_collect_trace(self):
"""Stops atrace.
Note that prior to Api 23, --async-stop isn't working correctly. It
doesn't stop tracing and clears trace buffer before dumping it rendering
results unusable."""
if self._device_sdk_version < version_codes.MARSHMALLOW:
is_trace_enabled_file = '/sys/kernel/debug/tracing/tracing_on'
# Stop tracing first so new data won't arrive while dump is performed (it
# may take a non-trivial time and tracing buffer may overflow).
self._device_utils.WriteFile(is_trace_enabled_file, '0')
result = self._device_utils.RunShellCommand(
self._tracer_args + ['--async_dump'], raw_output=True,
large_output=True, check_return=True,
timeout=ADB_LARGE_OUTPUT_TIMEOUT)
# Run synchronous tracing for 0 seconds to stop tracing, clear buffers
# and other state.
self._device_utils.RunShellCommand(
self._tracer_args + ['-t 0'], check_return=True)
else:
# On M+ --async_stop does everything necessary
result = self._device_utils.RunShellCommand(
self._tracer_args + ['--async_stop'], raw_output=True,
large_output=True, check_return=True,
timeout=ADB_LARGE_OUTPUT_TIMEOUT)
return result
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
result = self._stop_collect_trace()
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if _FIX_MISSING_TGIDS:
# Gather proc data from device and patch tgids
procfs_dump = self._device_utils.RunShellCommand(
'echo -n /proc/[0-9]*/task/[0-9]*',
shell=True, check_return=True)[0].split(' ')
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if _FIX_CIRCULAR_TRACES:
trace_data = fix_circular_traces(trace_data)
return trace_data
def extract_tgids(trace_lines):
"""Removes the procfs dump from the given trace text
Args:
trace_lines: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
for line in trace_lines:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
class AtraceConfig(tracing_agents.TracingConfig):
def __init__(self, atrace_categories, trace_buf_size, kfuncs,
app_name, compress_trace_data, from_file,
device_serial_number, trace_time, target):
tracing_agents.TracingConfig.__init__(self)
self.atrace_categories = atrace_categories
self.trace_buf_size = trace_buf_size
self.kfuncs = kfuncs
self.app_name = app_name
# Trace compression is broken on Windows.
# TODO: Fix https://crbug.com/739751.
self.compress_trace_data = \
compress_trace_data and platform.system() != 'Windows'
self.from_file = from_file
self.device_serial_number = device_serial_number
self.trace_time = trace_time
self.target = target
def add_options(parser):
options = optparse.OptionGroup(parser, 'Atrace options')
options.add_option('--atrace-categories', dest='atrace_categories',
help='Select atrace categories with a comma-delimited '
'list, e.g. --atrace-categories=cat1,cat2,cat3')
options.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
options.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the device not to send the trace data in '
'compressed form.')
options.add_option('-a', '--app', dest='app_name', default=None,
type='string', action='store',
help='enable application-level tracing for '
'comma-separated list of app cmdlines')
options.add_option('--from-file', dest='from_file',
action='store', help='read the trace from a '
'file (compressed) rather than running a '
'live trace')
return options
def get_config(options):
return AtraceConfig(options.atrace_categories,
options.trace_buf_size, options.kfuncs,
options.app_name, options.compress_trace_data,
options.from_file, options.device_serial_number,
options.trace_time, options.target)
|
mp_support.py
|
"""
This module defines modified versions of some classes and functions defined in
:mod:`multiprocessing.managers` to support OpenMDAO distributed simulations.
Channel communication can be secured by setting `authkey` to 'PublicKey'.
When `authkey` is 'PublicKey', an additional session establishment protocol
is used between the Proxy and Server:
1. Proxy sends session request containing the proxy's public key, encrypted with
the server's public key (obtained when told what server to connect to).
2. Server responds with random session key, encrypted with proxy's public key.
3. Subsequent communication is encrypted with the session key (which presumably
is quicker than public/private key encryption).
If `authkey` is not 'PublicKey', then the above session protocol is not used,
and channel data is in the clear.
Public methods of an object are determined by a role-based access control
attribute associated with the method. The server will verify that the current
role is allowed access. The current role is determined by an
:class:`AccessController` based on a :class:`Credentials` object received from
the proxy.
Assuming the credentials check passes, the server will set its credentials
to those specified by the :class:`AccessController` during the execution of the
method.
"""
# Unfortunately, there's a lot of multiprocessing package code duplication here.
# And a lot of this should be done at a lower layer. But then we'd be
# duplicating connection.py code as well as (more) managers.py code. Another
# alternative is to just use our own conection.py and managers.py (and whatever
# else might bleed in) as our private multiprocessing package.
# No obvious 'best' alternative.
import errno
import glob
import hashlib
import inspect
import logging
import os
import socket
import sys
import threading
import time
import traceback
from Crypto import Random
from multiprocessing import Process, current_process, connection, util
from multiprocessing.forking import Popen
from multiprocessing.managers import BaseManager, BaseProxy, RebuildProxy, \
Server, State, Token, convert_to_error, \
dispatch, RemoteError
if sys.platform == 'win32': #pragma no cover
from _multiprocessing import win32
from traits.trait_handlers import TraitDictObject
from openmdao.main.interfaces import implements, obj_has_interface, IContainerProxy
from openmdao.main.mp_util import decrypt, encrypt, is_legal_connection, \
keytype, make_typeid, public_methods, \
tunnel_address, SPECIALS
from openmdao.main.rbac import AccessController, RoleError, check_role, \
need_proxy, Credentials, \
get_credentials, set_credentials
from openmdao.util.log import install_remote_handler, remove_remote_handlers, \
logging_port, LOG_DEBUG2, LOG_DEBUG3
from openmdao.util.publickey import decode_public_key, encode_public_key, \
get_key_pair, HAVE_PYWIN32, \
pk_encrypt, pk_decrypt
# Classes which require proxying (used by default AccessController)
# Used to break import loop between this and openmdao.main.container.
CLASSES_TO_PROXY = []
# Cache of proxies created by _make_proxy_type().
_PROXY_CACHE = {}
def is_instance(obj, type_info):
"""
:func:`isinstance` replacement for when `obj` might be a proxy.
obj: object
Object to be tested.
type_info: class or tuple of classes
Class(es) to be tested against.
Returns True if `obj` is an instance of `type_info` or the object `obj`
refers to is an instance of `type_info`.
"""
if isinstance(obj, OpenMDAO_Proxy):
try:
type_info[0]
except TypeError:
type_info = (type_info,)
for typ in type_info:
typename = '%s.%s' % (typ.__module__, typ.__name__)
if obj.__is_instance__(typename):
return True
return False
else:
return isinstance(obj, type_info)
def has_interface(obj, *ifaces):
"""
:func:`obj_has_interface` replacement for when `obj` might be a proxy.
obj: object
Object to be tested.
ifaces: list[Interface]
Interfaces to be tested against.
Returns True if `obj` or the object `obj` refers to supports at least one
:class:`Interface` in `ifaces`.
"""
if isinstance(obj, OpenMDAO_Proxy):
for typ in ifaces:
typename = '%s.%s' % (typ.__module__, typ.__name__)
try:
if obj.__has_interface__(typename):
return True
except RemoteError:
return False
return False
else:
return obj_has_interface(obj, *ifaces)
class OpenMDAO_Server(Server):
"""
A :class:`Server` that supports dynamic proxy generation and credential
checking.
registry: dict
Manager's proxy registry.
address: tuple or string
A :mod:`multiprocessing` address specifying an Internet address or
a pipe.
authkey: string
Authorization key. Inherited from the current :class:`Process`
object if not specified.
serializer: string
Which serialization method to use.
name: string
Name for server, used in log files, etc.
allowed_hosts: list(string)
Host address patterns to check against.
Optional if `allowed_users` is specified.
allowed_users: dict
Dictionary of users and corresponding public keys allowed access.
If None, any user may access. If empty, no user may access.
The host portions of user strings are used for address patterns.
allow_tunneling: bool
If True, allow connections from the local host, even if not listed
otherwise.
"""
def __init__(self, registry, address, authkey, serializer, name=None,
allowed_hosts=None, allowed_users=None, allow_tunneling=False):
super(OpenMDAO_Server, self).__init__(registry, address, authkey,
serializer)
self.name = name or ('OMS_%d' % os.getpid())
self._logger = logging.getLogger(self.name)
self._logger.info('OpenMDAO_Server process %d started, %r',
os.getpid(), keytype(authkey))
Random.atfork() # Get our own PRNG.
self.host = socket.gethostname()
self._allowed_users = allowed_users
if self._allowed_users is not None:
hosts = set()
for user_host in self._allowed_users.keys():
user, host = user_host.split('@')
try:
ip_addr = socket.gethostbyname(host)
except socket.gaierror:
self._logger.warning('No address for %r', host)
else:
hosts.add(ip_addr)
if host == socket.getfqdn() or host == socket.gethostname():
hosts.add('127.0.0.1') # localhost
if allowed_hosts:
hosts |= set(allowed_hosts)
self._allowed_hosts = list(hosts)
else:
self._allowed_hosts = allowed_hosts or []
self._allow_tunneling = allow_tunneling
if allow_tunneling:
self._allowed_hosts.append('127.0.0.1') # localhost
self._allowed_hosts.append(socket.gethostbyname(address[0]))
if self._allowed_users is None:
self._logger.warning(' allowed_users: ANY')
else:
self._logger.info(' allowed_users: %s',
sorted(self._allowed_users.keys()))
if self._allowed_hosts:
self._logger.info(' allowed_hosts: %s',
sorted(self._allowed_hosts))
else:
self._logger.warning(' allowed_hosts: ANY')
self._authkey = authkey
if authkey == 'PublicKey':
# While we may be 'owned' by some remote user, use default
# credentials for getting our key pair. This avoids generation
# overhead and also issues with propagating our public key
# back through a proxy.
self._key_pair = get_key_pair(Credentials.user_host, self._logger)
else:
self._key_pair = None
self._id_to_controller = {}
self._access_controller = AccessController()
for cls in CLASSES_TO_PROXY:
self._access_controller.class_proxy_required(cls)
self._address_type = connection.address_type(self.address)
@property
def public_key(self):
""" Public key for session establishment. """
if self._authkey == 'PublicKey':
return self._key_pair.publickey()
# Just being defensive.
else: #pragma no cover
raise RuntimeError('No public key available')
# This happens on the remote server side and we'll check when connecting.
@property
def public_key_text(self): #pragma no cover
""" Text representation of public key. """
if self._authkey == 'PublicKey':
return encode_public_key(self.public_key)
else:
return ''
def serve_forever(self):
"""
Run the server forever.
This version supports host connection filtering.
Connection filtering allows for PublicKey servers which aren't
accessible by just any host.
"""
current_process()._manager_server = self
try:
try:
while not self.stop:
try:
conn = self.listener.accept()
# Comment-out the line above and use this equivalent
# to debug connectivity issues.
#conn = self.listener._listener.accept()
#self._logger.critical('connection attempt from %r',
# self.listener.last_accepted)
#if self.listener._authkey:
# connection.deliver_challenge(conn, self.listener._authkey)
# connection.answer_challenge(conn, self.listener._authkey)
# Hard to cause this to happen.
except (OSError, IOError): #pragma no cover
if self.stop:
break
else:
continue
address = self.listener.last_accepted
if address:
if not is_legal_connection(address, self._allowed_hosts,
self._logger):
conn.close()
continue
t = threading.Thread(target=self.handle_request,
args=(conn,))
t.daemon = True
try:
t.start()
# Don't want to cause this to happen.
except Exception as exc: #pragma no cover
self._logger.error("Can't start server thread: %r", exc)
conn.close()
continue
# Don't want to cause this to happen.
except (KeyboardInterrupt, SystemExit): #pragma no cover
pass
finally:
self.stop = 999
try:
self.listener.close()
except Exception as exc:
self._logger.error('Exception closing listener: %r', exc)
def handle_request(self, conn):
"""
Handle a new connection.
conn: socket or pipe
Connection to process.
This version filters host connections and avoids getting upset if it
can't deliver a challenge. This is to deal with immediately closed
connections caused by :meth:`manager_is_alive` which are used to avoid
getting hung trying to connect to a manager which is no longer there.
"""
funcname = result = request = None
try:
connection.deliver_challenge(conn, self.authkey)
except (EOFError, IOError) as exc:
conn.close()
return
# Hard to cause this to happen. It rarely happens, and then at shutdown.
except Exception as exc: #pragma no cover
msg = ('#TRACEBACK', 'Exception delivering challenge: %r' % exc)
try:
conn.send(msg)
except Exception as exc:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', exc)
conn.close()
return
try:
connection.answer_challenge(conn, self.authkey)
request = conn.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
# Hard to cause this to happen. It rarely happens, and then at shutdown.
except Exception as exc: #pragma no cover
msg = ('#TRACEBACK', 'Exception answering challenge: %r' % exc)
else:
try:
result = func(conn, *args, **kwds)
# Hard to cause this to happen. It rarely happens, and then at shutdown.
except Exception as exc: #pragma no cover
try: # Sometimes at shutdown 'traceback' is None!?
msg = ('#TRACEBACK', traceback.format_exc())
except Exception:
msg = ('#TRACEBACK',
'Exception from %r: %r' % (funcname, exc))
else:
msg = ('#RETURN', result)
try:
conn.send(msg)
# Hard to cause this to happen. It rarely happens, and then at shutdown.
except Exception as exc: #pragma no cover
try:
conn.send(('#TRACEBACK', 'Exception sending reply: %r' % exc))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', exc)
conn.close()
def serve_client(self, conn):
"""
Handle requests from the proxies in a particular process/thread.
conn: socket or pipe
Connection to process.
This version supports dynamic proxy generation and credential checking.
"""
self._logger.log(LOG_DEBUG2, 'starting server thread to service %r, %s',
threading.current_thread().name, keytype(self._authkey))
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
id_to_controller = self._id_to_controller
if self._authkey == 'PublicKey':
client_key, session_key = self._init_session(conn)
else:
client_key = ''
session_key = ''
while not self.stop:
try:
ident = methodname = args = kwds = credentials = None
obj = exposed = gettypeid = None
data = recv()
try:
request = decrypt(data, session_key)
except Exception as exc:
trace = traceback.format_exc()
msg = "Can't decrypt/unpack request. This could be the" \
" result of referring to a dead server."
self._logger.error(msg)
self._logger.error(trace)
raise RuntimeError(msg)
ident, methodname, args, kwds, credentials = request
self._logger.log(LOG_DEBUG3, 'request %s %s', ident, methodname)
# self._logger.log(LOG_DEBUG3, 'credentials %s', credentials)
# self._logger.log(LOG_DEBUG3, 'id_to_obj:\n%s',
# self.debug_info(conn))
# Decode and verify valid credentials.
try:
credentials = Credentials.verify(credentials,
self._allowed_users)
except Exception as exc:
self._logger.error('%r' % exc)
raise
try:
obj, exposed, gettypeid = id_to_obj[ident]
# Hard to cause this to happen.
except KeyError: #pragma no cover
msg = 'No object for ident %s' % ident
self._logger.error(msg)
raise KeyError('%s %r: %s' % (self.host, self.name, msg))
if methodname not in exposed:
# Try to raise with a useful error message.
if methodname == '__getattr__':
try:
val = getattr(obj, args[0])
except AttributeError:
raise AttributeError(
'attribute %r of %r object does not exist'
% (args[0], type(obj)))
if inspect.ismethod(val):
methodname = args[0]
else:
raise AttributeError(
'attribute %r of %r is not accessible'
% (args[0], type(obj)))
raise AttributeError(
'method %r of %r object is not in exposed=%r'
% (methodname, type(obj), exposed))
# Set correct credentials for function lookup.
set_credentials(credentials)
function = getattr(obj, methodname)
# Proxy pass-through only happens remotely.
if isinstance(obj, BaseProxy): #pragma no cover
role = None
access_controller = None
else:
# Check for allowed access.
role, credentials, access_controller = \
self._check_access(ident, methodname, function, args,
credentials)
if methodname != 'echo':
# 'echo' is used for performance tests, keepalives, etc.
self._logger.log(LOG_DEBUG2, "Invoke %s %s '%s'",
methodname, role, credentials)
self._logger.log(LOG_DEBUG3, ' %s %s', args, kwds)
# Invoke function.
try:
try:
res = function(*args, **kwds)
self._logger.log(LOG_DEBUG3, ' res %r', res)
except AttributeError as exc:
if isinstance(obj, BaseProxy) and \
methodname == '__getattribute__':
# Avoid an extra round-trip.
res = obj.__getattr__(*args, **kwds)
else:
raise
except Exception as exc:
self._logger.exception('%s %s %s failed:',
methodname, role, credentials)
msg = ('#TRACEBACK', traceback.format_exc())
else:
msg = self._form_reply(res, ident, methodname, function,
args, access_controller, conn)
except AttributeError:
# Just being defensive, this should never happen.
if methodname is None: #pragma no cover
msg = ('#TRACEBACK', traceback.format_exc())
else:
orig_traceback = traceback.format_exc()
try:
fallback_func = self.fallback_mapping[methodname]
self._logger.log(LOG_DEBUG2, 'Fallback %s', methodname)
result = fallback_func(self, conn, ident, obj,
*args, **kwds)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', orig_traceback)
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
# Just being defensive, this should never happen.
except Exception: #pragma no cover
trace = traceback.format_exc()
self._logger.error('serve_client exception, method %s',
methodname)
self._logger.error(trace)
msg = ('#TRACEBACK', trace)
try:
try:
send(encrypt(msg, session_key))
except Exception:
send(encrypt(('#UNSERIALIZABLE', repr(msg)), session_key))
# Just being defensive, this should never happen.
except Exception as exc: #pragma no cover
self._logger.error('exception in thread serving %r',
threading.current_thread().name)
self._logger.error(' ... message was %r', msg)
self._logger.error(' ... exception was %r', exc)
conn.close()
sys.exit(1)
def _init_session(self, conn):
""" Receive client public key, send session key. """
# Hard to cause exceptions to happen where we'll see them.
try:
client_data = conn.recv()
except Exception as exc: #pragma no cover
self._logger.error("Can't receive client data: %r", exc)
raise
client_version = client_data[0]
if client_version != 1: #pragma no cover
msg = 'Expected client protocol version 1, got %r' % client_version
self._logger.error(msg)
raise RuntimeError(msg)
n, e, encrypted = client_data[1:]
if e != self._key_pair.e or n != self._key_pair.n: #pragma no cover
msg = 'Server key mismatch'
self._logger.error(msg)
raise RuntimeError(msg)
try:
text = pk_decrypt(encrypted, self._key_pair)
client_key = decode_public_key(text)
except Exception as exc: #pragma no cover
self._logger.error("Can't recreate client key: %r", exc)
raise
server_version = 1
try:
session_key = hashlib.sha1(str(id(conn))).hexdigest()
data = client_key.encrypt(session_key, '')
conn.send((server_version, data))
except Exception as exc: #pragma no cover
self._logger.error("Can't send session key: %r", exc)
raise
return (client_key, session_key)
def _check_access(self, ident, methodname, function, args, credentials):
""" Check for valid access, return (role, credentials, controller). """
obj, exposed, gettypeid = self.id_to_obj[ident]
# Get access controller for obj.
access_controller = self._id_to_controller.get(ident)
if access_controller is None:
try:
get_access_controller = getattr(obj, 'get_access_controller')
except AttributeError:
access_controller = self._access_controller
# Only happens on remote protected object.
else: #pragma no cover
if get_access_controller is None:
access_controller = self._access_controller
else:
access_controller = get_access_controller()
self._id_to_controller[ident] = access_controller
# Get role based on credentials.
role = access_controller.get_role(credentials)
if methodname in SPECIALS:
# Check for valid access based on role.
access_controller.check_access(role, methodname, obj, args[0])
else:
# Check for valid role.
try:
check_role(role, function)
except RoleError as exc:
raise RoleError('%s(): %s' % (methodname, exc))
# Set credentials for execution of function. Typically
# these are just the credentials of the caller, but
# sometimes a function needs to execute with different
# credentials.
credentials = \
access_controller.get_proxy_credentials(function, credentials)
set_credentials(credentials)
return (role, credentials, access_controller)
def _form_reply(self, res, ident, methodname, function, args,
access_controller, conn):
""" Return reply message for `res`. """
obj, exposed, gettypeid = self.id_to_obj[ident]
msg = None
typeid = None
if inspect.ismethod(res) and \
(methodname == '__getattribute__' or methodname == '__getattr__'):
# More informative for common case of missing RBAC.
raise AttributeError('method %r of %r object is not in exposed=%r'
% (args[0], type(obj), exposed))
# Proxy pass-through only happens remotely.
if isinstance(res, OpenMDAO_Proxy): #pragma no cover
res_address = res._token.address
res_type = connection.address_type(res_address)
proxy_proxy = False
if res_type != self._address_type:
proxy_proxy = True # Different type of connection.
elif res_type == 'AF_INET':
if res_address[0] != self.address[0]:
proxy_proxy = True # Different network.
elif res_address[0] == '127.0.0.1' and self._allow_tunneling:
proxy_proxy = True # Access through tunnel.
if proxy_proxy:
# Create proxy for proxy.
typeid = res._token.typeid
proxyid = make_typeid(res)
self._logger.log(LOG_DEBUG2, 'Creating proxy for proxy %s',
proxyid)
if proxyid not in self.registry:
self.registry[proxyid] = (None, None, None, _auto_proxy)
else:
# Propagate the proxy info.
res._close.cancel() # Don't decref when reaped.
msg = ('#PROXY', (res._exposed_, res._token, res._pubkey))
elif access_controller is not None:
# Check if the value must be proxied.
if methodname in SPECIALS:
if access_controller.need_proxy(obj, args[0], res):
# Create proxy if in declared proxy types.
typeid = make_typeid(res)
proxyid = typeid
if typeid not in self.registry:
self.registry[typeid] = (None, None, None, None)
elif need_proxy(function, res, access_controller):
# Create proxy if in declared proxy types.
typeid = make_typeid(res)
proxyid = typeid
if typeid not in self.registry:
self.registry[typeid] = (None, None, None, None)
elif hasattr(res, '_parent') and res._parent is not None:
# Check if the value must be copied (VariableTree).
# Odd that it isn't being proxied (though we don't want one).
res = res.copy()
# Proxy pass-through only happens remotely.
else: #pragma no cover
# Create proxy if registered.
typeid = gettypeid and gettypeid.get(methodname, None)
proxyid = typeid
if msg is None:
if typeid:
rident, rexposed = self.create(conn, proxyid, res)
token = Token(typeid, self.address, rident)
self._logger.log(LOG_DEBUG2, 'Returning proxy for %s at %s',
typeid, self.address)
if self._key_pair is None:
pubkey = None
else:
pubkey = self._key_pair.publickey()
msg = ('#PROXY', (rexposed, token, pubkey))
else:
msg = ('#RETURN', res)
return msg
def _fallback_isinstance(self, conn, ident, obj, typename):
""" Check if `obj` is an instance of `typename`. """
# It's very difficult to get an arbitrary `typename` here.
dot = typename.rfind('.')
module_name = typename[:dot]
class_name = typename[dot+1:]
try:
module = __import__(module_name)
except ImportError: #pragma no cover
return False
module = sys.modules[module_name]
try:
cls = getattr(module, class_name)
except AttributeError: #pragma no cover
return False
return isinstance(obj, cls)
Server.fallback_mapping['__is_instance__'] = _fallback_isinstance
def _fallback_hasinterface(self, conn, ident, obj, typename):
""" Check if `obj` supports `typename`. """
# It's very difficult to get an arbitrary `typename` here.
dot = typename.rfind('.')
module_name = typename[:dot]
class_name = typename[dot+1:]
try:
module = __import__(module_name)
except ImportError: #pragma no cover
return False
module = sys.modules[module_name]
try:
cls = getattr(module, class_name)
except AttributeError: #pragma no cover
return False
return obj_has_interface(obj, cls)
Server.fallback_mapping['__has_interface__'] = _fallback_hasinterface
# This is for low-level debugging of servers.
def debug_info(self, conn): #pragma no cover
"""
Return string representing state of id_to_obj mapping.
conn: socket or pipe
Unused.
This version handles proxies in a special manner.
"""
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != '0':
obj = self.id_to_obj[ident][0]
if isinstance(obj, BaseProxy):
obj_str = '%s proxy for %s %s' \
% (keytype(obj._authkey), obj._id,
obj._token.typeid)
else:
obj_str = str(obj)[:75]
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident], obj_str))
return '\n'.join(result)
finally:
self.mutex.release()
def create(self, conn, typeid, *args, **kwds):
"""
Create a new shared object and return its id.
conn: socket or pipe
Connection to process.
typeid: string
Identifier string for type of object to be created.
This version uses :func:`public_methods`.
"""
self.mutex.acquire()
try:
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
# Just being defensive.
except KeyError: #pragma no cover
logging.error('mp_support.create: %r registry', typeid)
for key, value in self.registry.items():
logging.error(' %s: %s', key, value)
raise
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(conn, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
# Will only be seen on remote.
def shutdown(self, conn): #pragma no cover
""" Shutdown this process. """
self.stop = 888
msg = 'received shutdown request, running exit functions'
print msg
sys.stdout.flush()
self._logger.info(msg)
remove_remote_handlers()
# Deprecated, but marginally better than atexit._run_exitfuncs()
# Don't try to log here, logging shuts-down via atexit.
if hasattr(sys, 'exitfunc'):
try:
sys.exitfunc()
except Exception as exc:
print 'sys.exitfunc(): %s' % exc
sys.stdout.flush()
print ' exit functions complete'
sys.stdout.flush()
super(OpenMDAO_Server, self).shutdown(conn)
class OpenMDAO_Manager(BaseManager):
"""
Uses :class:`OpenMDAO_Server`, retains the public key, and puts some slack
in shutdown timing.
address: tuple or string
A :mod:`multiprocessing` address specifying an Internet address or
a pipe.
authkey: string
Authorization key. Inherited from the current :class:`Process`
object if not specified.
serializer: string
Which serialization method to use.
pubkey: public key
Public portion of server's public/private key pair.
Needed for client/proxy side.
name: string
Name for server, used in log files, etc.
allowed_hosts: list(string)
Host address patterns to check against.
allowed_users: dict
Dictionary of users and corresponding public keys allowed access.
If None, any user may access. If empty, no user may access.
allow_tunneling: bool
If True, allow connections from 127.0.0.1 (localhost), even if not
listed otherwise.
"""
_Server = OpenMDAO_Server
def __init__(self, address=None, authkey=None, serializer='pickle',
pubkey=None, name=None, allowed_hosts=None, allowed_users=None,
allow_tunneling=False):
super(OpenMDAO_Manager, self).__init__(address, authkey, serializer)
self._pubkey = pubkey
self._name = name
self._allowed_hosts = allowed_hosts
self._allowed_users = allowed_users
self._allow_tunneling = allow_tunneling
def get_server(self):
"""
Return a server object with :meth:`serve_forever` and address attribute.
"""
assert self._state.value == State.INITIAL
return OpenMDAO_Server(self._registry, self._address, self._authkey,
self._serializer, self._name,
self._allowed_hosts, self._allowed_users,
self._allow_tunneling)
def start(self, cwd=None, log_level=logging.DEBUG):
"""
Spawn a server process for this manager object.
cwd: string
Directory to start in.
log_level: int
Initial root logging level for the server process.
This version retrieves the server's public key.
"""
assert self._state.value == State.INITIAL
# Pipe over which we will retrieve address of server.
reader, writer = connection.Pipe(duplex=False)
if sys.platform == 'win32': #pragma no cover
# Make registry pickleable.
registry = {}
for typeid, info in self._registry.items():
callable, exposed, method_to_typeid, proxytype = info
if proxytype and proxytype != _auto_proxy:
registry[typeid] = \
(callable, exposed, method_to_typeid, 'rebuild')
else:
registry = self._registry
# Flush logs before cloning.
for handler in logging._handlerList:
try:
handler.flush()
except AttributeError:
h = handler() # WeakRef
if h:
h.flush()
# Spawn process which runs a server.
credentials = get_credentials()
log_port = logging_port('localhost', 'localhost')
self._process = Process(
target=type(self)._run_server,
args=(registry, self._address, self._authkey,
self._serializer, self._name, self._allowed_hosts,
self._allowed_users, self._allow_tunneling,
writer, credentials, cwd, log_port, log_level),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
pid = self._process.pid
# Get address of server.
if self._authkey == 'PublicKey':
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
timeout = 120
else:
timeout = 10
else:
timeout = 10
writer.close()
start = time.time()
error_msg = None
for retry in range(timeout):
if reader.poll(1):
break
if not self._process.is_alive():
error_msg = 'Server process %d exited: %s' \
% (pid, self._process.exitcode)
break
# Hard to cause a timeout.
else: #pragma no cover
if error_msg is None:
et = time.time() - start
self._process.terminate()
error_msg = 'Server process %d startup timed-out in %.2f' \
% (pid, et)
if error_msg is None:
try:
reply = reader.recv()
except Exception as exc: # str(Exception()) is null, repr() isn't.
error_msg = 'Server process %d read failed: %s' \
% (pid, str(exc) or repr(exc))
else:
if isinstance(reply, Exception):
error_msg = 'Server process %d startup failed: %s' \
% (pid, str(reply) or repr(reply))
if error_msg:
logging.error(error_msg)
if cwd:
logging.error(' in dir %r', cwd)
for name in ('stdout', 'stderr', 'openmdao_log*.txt'):
for path in glob.glob(os.path.join(cwd, name)):
name = os.path.basename(path)
with open(path, 'r') as inp:
logging.error(' %s:\n%s', name, inp.read())
raise RuntimeError(error_msg)
self._address = reply
if self._authkey == 'PublicKey':
self._pubkey = reader.recv()
reader.close()
et = time.time() - start
logging.debug('Server process %d startup in %.2f', pid, et)
# Register a finalizer.
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
# This happens on the remote server side and we'll check when using it.
@classmethod
def _run_server(cls, registry, address, authkey, serializer, name,
allowed_hosts, allowed_users, allow_tunneling,
writer, credentials, cwd, log_port, log_level): #pragma no cover
"""
Create a server, report its address and public key, and run it.
"""
try:
if sys.platform == 'win32':
set_credentials(credentials)
# Recreate registry proxytypes.
for typeid, info in registry.items():
callable, exposed, method_to_typeid, proxytype = info
if proxytype == 'rebuild':
registry[typeid] = (callable, exposed, method_to_typeid,
_make_proxy_type(typeid, exposed))
# If specified, move to new directory.
if cwd is not None:
os.chdir(cwd)
# Cleanup cloned logging environment.
del logging.root.handlers[:]
del logging._handlerList[:]
logging._handlers.clear()
# Reset stdout & stderr.
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = open('stdout', 'w')
sys.stderr = open('stderr', 'w')
# Reset logging.
logging.basicConfig(level=log_level,
datefmt='%b %d %H:%M:%S',
format='%(asctime)s %(levelname)s %(name)s: %(message)s',
filename='openmdao_log.txt', filemode='w')
# Connect to remote logging server.
if log_port:
install_remote_handler('localhost', log_port, name)
# Create server.
server = cls._Server(registry, address, authkey, serializer, name,
allowed_hosts, allowed_users, allow_tunneling)
except Exception as exc:
traceback.print_exc()
writer.send(exc)
return
else:
# Inform parent process of the server's address.
writer.send(server.address)
if authkey == 'PublicKey':
writer.send(server.public_key)
finally:
writer.close()
# Run the manager.
util.info('manager serving at %r', server.address)
server.serve_forever()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
"""
Shutdown the manager process; will be registered as a finalizer.
This version uses relaxed timing.
"""
if process.is_alive():
logging.debug('sending shutdown message to manager')
try:
conn = _get_connection(_Client, address, authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
# Just being defensive here.
except Exception: #pragma no cover
pass
process.join(timeout=2)
# No good way to cause process to not shut down.
if process.is_alive(): #pragma no cover
logging.warning('manager still alive after shutdown request')
if hasattr(process, 'terminate'):
logging.warning('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=1)
if process.is_alive():
logging.warning('manager still alive after terminate')
if process.is_alive():
if sys.platform != 'win32':
os.kill(process.pid. signal.SIGKILL)
process.join(timeout=1)
if process.is_alive():
logging.warning('manager still alive after kill')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
# Just being defensive here.
except KeyError: #pragma no cover
pass
class ObjectManager(object):
"""
Provides a multiprocessing interface for an existing object.
obj: object
Object to provide remote access to.
address: tuple or string
A :mod:`multiprocessing` address specifying an Internet address or
a pipe.
serializer: string
Which serialization method to use.
authkey: string
Authorization key. Inherited from the current :class:`Process`
object if not specified.
name: string
Name for server, used in log files, etc.
allowed_hosts: list(string)
Host address patterns to check against.
allowed_users: dict
Dictionary of users and corresponding public keys allowed access.
If None, any user may access. If empty, no user may access.
"""
def __init__(self, obj, address=None, serializer='pickle', authkey=None,
name=None, allowed_hosts=None, allowed_users=None):
self._typeid = make_typeid(obj)
self._ident = '%x' % id(obj)
logging.debug('ObjectManager address %s, %r, name %r, ident %r',
address, keytype(authkey), name, self._ident)
self._manager = OpenMDAO_Manager(address=address, serializer=serializer,
authkey=authkey, name=name,
allowed_hosts=allowed_hosts,
allowed_users=allowed_users)
self._server = self._manager.get_server()
self._exposed = public_methods(obj)
with self._server.mutex:
self._server.id_to_obj[self._ident] = (obj, self._exposed, None)
self._server.id_to_refcount[self._ident] = 1
self._server_thread = threading.Thread(target=self._run_server)
self._server_thread.daemon = True
self._server_thread.start()
self._proxy = None
@property
def proxy(self):
""" Proxy for our object server. """
if self._proxy is None:
token = Token(self._typeid, self._server.address, self._ident)
authkey = self._server._authkey
pubkey = self._server.public_key if authkey == 'PublicKey' else None
self._proxy = _auto_proxy(token, self._manager._serializer,
authkey=authkey, exposed=self._exposed,
pubkey=pubkey)
self._server.incref(None, self._ident)
return self._proxy
def _run_server(self):
""" Run a manager server (in a separate thread!). """
self._server.serve_forever()
class OpenMDAO_Proxy(BaseProxy):
"""
Proxy for a remote object.
args: tuple
Passed to :class:`BaseProxy`.
kwds: dict
If it contains `pubkey`, then that value is used for the remote public
key, and the entry is removed before being passed to :class:`BaseProxy`.
This version sends credentials and provides dynamic result proxy generation.
.. note::
:meth:`BaseProxy.__str__` (used by :meth:`str` and the ``%s`` format)
will return :meth:`__repr__` of the remote object. To avoid the
network round-trip delay, use :meth:`repr` or the ``%r`` format.
"""
implements(IContainerProxy)
def __init__(self, *args, **kwds):
try:
pubkey = kwds['pubkey']
except KeyError:
pubkey = None
else:
del kwds['pubkey']
super(OpenMDAO_Proxy, self).__init__(*args, **kwds)
if self._manager is None:
self._pubkey = pubkey
else:
self._pubkey = self._manager._pubkey
def _connect(self):
""" This version translates tunneled addresses. """
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
address = tunnel_address(self._token.address)
conn = self._Client(address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=None, kwds=None):
"""
Try to call a method of the referrent and return a copy of the result.
This version optionally encrypts the channel and sends the current
thread's credentials with method arguments.
"""
args = args or ()
kwds = kwds or {}
try:
conn = self._tls.connection
except AttributeError:
curr_thread = threading.current_thread()
util.debug('thread %r does not own a connection', curr_thread.name)
try:
self._connect()
except Exception as exc:
msg = "Can't connect to server at %r for %r: %r" \
% (self._token.address, methodname, exc)
logging.error(msg)
raise RuntimeError(msg)
conn = self._tls.connection
if self._authkey == 'PublicKey':
self._init_session(conn)
else:
self._tls.session_key = ''
session_key = self._tls.session_key
# FIXME: Bizarre problem evidenced by test_extcode.py (Python 2.6.1)
# For some reason pickling the env_vars dictionary causes:
# PicklingError: Can't pickle <class 'openmdao.main.mp_support.ObjServer'>:
# attribute lookup openmdao.main.mp_support.ObjServer failed
# The reported type is not in the (current) Dict items.
# Apparently this is some Traits 'feature'.
new_args = []
for arg in args:
if isinstance(arg, TraitDictObject):
new_args.append(dict(arg))
else:
new_args.append(arg)
try:
conn.send(encrypt((self._id, methodname, new_args, kwds,
get_credentials().encode()), session_key))
except IOError as exc:
msg = "Can't send to server at %r for %r: %r" \
% (self._token.address, methodname, exc)
logging.error(msg)
raise RuntimeError(msg)
kind, result = decrypt(conn.recv(), session_key)
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token, pubkey = result
# Proxy passthru only happens remotely.
if self._manager is None: #pragma no cover
self._manager = OpenMDAO_Manager(token.address, self._authkey,
pubkey=pubkey)
try:
proxytype = self._manager._registry[token.typeid][-1]
except KeyError:
proxytype = None
if proxytype is None:
self._manager.register(token.typeid, None, _auto_proxy)
proxytype = self._manager._registry[token.typeid][-1]
if token.address != self._manager.address:
# Proxy to different server than request was sent to.
manager = OpenMDAO_Manager(token.address, self._authkey,
pubkey=pubkey)
else:
manager = self._manager
proxy = proxytype(token, self._serializer, manager=manager,
authkey=self._authkey, exposed=exposed,
pubkey=pubkey)
conn = _get_connection(self._Client, token.address, self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _init_session(self, conn):
""" Send client public key, receive session key. """
key_pair = get_key_pair(Credentials.user_host)
public_key = key_pair.publickey()
text = encode_public_key(public_key)
server_key = self._pubkey
encrypted = pk_encrypt(text, server_key)
client_version = 1
conn.send((client_version, server_key.n, server_key.e, encrypted))
server_data = conn.recv()
server_version = server_data[0]
# Just being defensive, this should never happen.
if server_version != 1: #pragma no cover
msg = 'Expecting server protocol version 1, got %r' % server_version
logging.error(msg)
if server_version == '#TRACEBACK':
try:
logging.error(''.join(server_data[1]))
except Exception:
pass
raise RuntimeError(msg)
self._tls.session_key = key_pair.decrypt(server_data[1])
def _incref(self):
"""
Tell server to increment its reference count.
This version avoids a hang in _Client if the server no longer exists.
"""
# Hard to cause this to happen.
if not OpenMDAO_Proxy.manager_is_alive(self._token.address): #pragma no cover
raise RuntimeError('Cannot connect to manager at %r'
% (self._token.address,))
conn = _get_connection(self._Client, self._token.address, self._authkey)
dispatch(conn, None, 'incref', (self._id,))
# Enable this with care. While testing CaseIteratorDriver it can cause a
# deadlock in logging (called via BaseProxy._after_fork()).
#util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, OpenMDAO_Proxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
"""
Tell server to decrement its reference count.
This version avoids a hang in _Client if the server no longer exists.
"""
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# Avoid a hang in _Client() if the server isn't there anymore.
if OpenMDAO_Proxy.manager_is_alive(token.address):
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _get_connection(_Client, token.address, authkey)
dispatch(conn, None, 'decref', (token.id,))
# Hard to cause this to happen.
except Exception as exc: #pragma no cover
util.debug('... decref failed %s', exc)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more %r proxies so closing conn',
threading.current_thread().name, token.typeid)
tls.connection.close()
del tls.connection
@staticmethod
def manager_is_alive(address):
"""
Check whether manager is still alive.
address: tuple or string
A :mod:`multiprocessing` address specifying an Internet address or
a pipe.
"""
addr_type = connection.address_type(address)
if addr_type == 'AF_PIPE': #pragma no cover
try:
win32.WaitNamedPipe(address, 10) # 0.01 sec
except WindowsError:
alive = False
else:
alive = True
else:
if addr_type == 'AF_INET':
sock = socket.socket(socket.AF_INET)
elif addr_type == 'AF_UNIX':
sock = socket.socket(socket.AF_UNIX)
address = tunnel_address(address)
try:
sock.connect(address)
except socket.error as exc:
if exc.args[0] == errno.ECONNREFUSED or \
exc.args[0] == errno.ENOENT:
alive = False
# Just being defensive.
else: #pragma no cover
raise
else:
sock.close()
alive = True
return alive
def __reduce__(self):
""" For unpickling. This version uses :func:`_auto_proxy`. """
kwds = {}
kwds['pubkey'] = self._pubkey
# Happens on other side of fork().
if Popen.thread_is_spawning(): #pragma no cover
kwds['authkey'] = self._authkey
elif self._pubkey:
# Can't pickle an AuthenticationString.
kwds['authkey'] = 'PublicKey'
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(_auto_proxy, self._token, self._serializer, kwds))
def register(cls, manager, module=None):
"""
Register class `cls` proxy info with `manager`. The class will be
registered under it's full path, with '.' replaced by '_'.
Not typically called by user code.
cls: class
Class to be registered.
manager: :class:`OpenMDAO_Manager`
Manager to register with.
module: string
Module name for registration. Necessary if `cls` might be defined
by module '__main__'.
"""
if module is None:
module = cls.__module__
typeid = '%s.%s' % (module, cls.__name__)
typeid = typeid.replace('.', '_')
exposed = public_methods(cls)
proxytype = _make_proxy_type(typeid, exposed)
manager.register(typeid, callable=cls, exposed=exposed, proxytype=proxytype)
def _make_proxy_type(name, exposed):
"""
Return a proxy type whose methods are given by `exposed`.
This version supports special attribute access methods.
"""
exposed = tuple(exposed)
try:
return _PROXY_CACHE[(name, exposed)]
except KeyError:
pass
dic = {'_instance_names': {}, '_interface_names': {}}
for meth in exposed:
if meth == '__is_instance__':
# Call remote if we don't have the answer in cache.
exec """
def __is_instance__(self, *args, **kwds):
try:
return self._instance_names[args[0]]
except KeyError:
yes_no = self._callmethod('__is_instance__', args, kwds)
self._instance_names[args[0]] = yes_no
return yes_no
""" in dic
elif meth == '__has_interface__':
# Call remote if we don't have the answer in cache.
exec """
def __has_interface__(self, *args, **kwds):
try:
return self._interface_names[args[0]]
except KeyError:
yes_no = self._callmethod('__has_interface__', args, kwds)
self._interface_names[args[0]] = yes_no
return yes_no
""" in dic
elif meth == '__getattr__':
pass # Avoid recursion loop, but must be in 'exposed'.
elif meth == '__getattribute__':
# Call remote if not private or defined locally (proxied methods).
exec """
def __getattr__(self, *args, **kwds):
if args[0][0] == '_':
return object.__getattribute__(self, *args, **kwds)
try:
return object.__getattribute__(self, *args, **kwds)
except AttributeError:
try:
return self._callmethod('__getattribute__', args, kwds)
except AttributeError:
return self._callmethod('__getattr__', args, kwds)
""" in dic
elif meth == '__setattr__' or meth == '__delattr__':
# Call remote if not private.
exec """
def %s(self, *args, **kwds):
if args[0][0] == '_':
return object.%s(self, *args, **kwds)
return self._callmethod(%r, args, kwds)
""" % (meth, meth, meth) in dic
elif meth == '__exit__':
# Can't pickle traceback argument.
exec """
def __exit__(self, exc_type, exc_value, traceback):
return self._callmethod('__exit__', (exc_type, exc_value, None))
""" in dic
else:
# Normal method always calls remote.
exec """
def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)
""" % (meth, meth) in dic
ProxyType = type(name, (OpenMDAO_Proxy,), dic)
ProxyType._exposed_ = exposed
_PROXY_CACHE[(name, exposed)] = ProxyType
return ProxyType
def _auto_proxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True, pubkey=None):
"""
Return an auto-proxy for `token`.
This version uses :func:`_make_proxy_type`.
"""
ProxyType = _make_proxy_type('OpenMDAO_AutoProxy[%s]' % token.typeid,
exposed)
try:
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref, pubkey=pubkey)
except Exception:
logging.exception('Auto proxy creation failed for %s at %s:',
token.typeid, token.address)
raise
proxy._isauto = True
return proxy
def _get_connection(_client, address, authkey):
"""
Get client connection to `address` using `authkey`.
Avoids dying on 'Interrupted system call'. (Should be in lower layer)
"""
address = tunnel_address(address)
for retry in range(3):
try:
conn = _client(address, authkey=authkey)
except IOError as exc:
if exc.errno != 4 or retry >= 2:
raise
except Exception:
raise
else:
return conn
raise RuntimeError('Too many connection attempts')
|
DatasetLoader_imbalance.py
|
#! /usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import numpy
import random
import pdb
import os
import threading
import time
import math
from scipy.io import wavfile
from queue import Queue
import random
def round_down(num, divisor):
return num - (num%divisor)
def loadWAV(filename, max_frames, evalmode=True, num_eval=20):
# Maximum audio length
max_audio = max_frames * 160 + 240
# Read wav file and convert to torch tensor
sample_rate, audio = wavfile.read(filename)
audiosize = audio.shape[0]
if audiosize <= max_audio:
shortage = math.floor( ( max_audio - audiosize + 1 ) / 2 )
audio = numpy.pad(audio, (shortage, shortage), 'constant', constant_values=0)
audiosize = audio.shape[0]
if evalmode:
startframe = numpy.linspace(0,audiosize-max_audio,num=num_eval)
else:
startframe = numpy.array([numpy.int64(random.random()*(audiosize-max_audio))])
feats = []
if evalmode and max_frames == 0:
feats.append(audio)
else:
for asf in startframe:
feats.append(audio[int(asf):int(asf)+max_audio])
feat = numpy.stack(feats,axis=0)
feat = torch.FloatTensor(feat)
return feat;
class DatasetLoader_imbalance(object):
def __init__(self, dataset_file_name, batch_size, max_frames, max_seg_per_spk, nDataLoaderThread, gSize, train_path, maxQueueSize = 10, **kwargs):
self.dataset_file_name = dataset_file_name;
self.nWorkers = nDataLoaderThread;
self.max_frames = max_frames;
self.max_seg_per_spk = max_seg_per_spk;
self.batch_size = batch_size;
self.maxQueueSize = maxQueueSize;
self.data_dict = {};
self.data_list = [];
self.nFiles = 0;
self.gSize = gSize; ## number of clips per sample (e.g. 1 for softmax, 2 for triplet or pm)
self.dataLoaders = [];
### Read Training Files...
with open(dataset_file_name) as dataset_file:
while True:
line = dataset_file.readline();
if not line:
break;
data = line.split();
speaker_name = data[0];
filename = os.path.join(train_path,data[1]);
if not (speaker_name in self.data_dict):
self.data_dict[speaker_name] = [];
self.data_dict[speaker_name].append(filename);
### Initialize Workers...
self.datasetQueue = Queue(self.maxQueueSize);
def dataLoaderThread(self, nThreadIndex):
index = nThreadIndex*self.batch_size;
if(index >= self.nFiles):
return;
while(True):
if(self.datasetQueue.full() == True):
time.sleep(1.0);
continue;
in_data = [];
feat = []
feat_label = []
for ij in range(index,index+self.batch_size):
for ii in range(len(self.data_list[ij])):
feat.append(loadWAV(self.data_list[ij][ii], self.max_frames, evalmode=False));
feat_label.append(self.data_label[ij])
#print(feat[0].shape) #(1,32240)
in_data.append(torch.cat(feat, dim=0));
#print(in_data[0].shape) #(batch, 32240)
#in_label = numpy.asarray(self.data_label[index:index+self.batch_size]);
in_label = numpy.asarray(feat_label)
self.datasetQueue.put([in_data, in_label]);
#print(len(in_data)) 2 for gSize=2
index += self.batch_size*self.nWorkers; #here is how thread works
if(index+self.batch_size > self.nFiles):
break;
def __iter__(self): #first: update dataloader(give whole dataset at one time) (iterator typically returns itself)
dictkeys = list(self.data_dict.keys());
dictkeys.sort()
lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
flattened_list = []
flattened_label = []
## Data for each class
########################################
#here! gSize need to be randomly generated
#######################################
gsize_arr = [2,3]
for findex, key in enumerate(dictkeys):
g_size = random.sample(gsize_arr,1)[0]
data = self.data_dict[key] #e.g. len(data) = 103
numSeg = round_down(min(len(data),self.max_seg_per_spk),g_size) #numSeg = 102
rp = lol(numpy.random.permutation(len(data))[:numSeg],g_size) #(51, 2)
flattened_label.extend([findex] * (len(rp)))
for indices in rp:
flattened_list.append([data[i] for i in indices])
#flattened_list: [(51,2),(23,2),(40,2),...,(19,2),(13,2)] len(flattened_list) = 900000
#flattened_label: [(0,0,0,0,...),(1,1,1,1,...),(2,2,2,2,2....),......,(5992,5992,5992,5992,....,5992),(5993,5993,5993,...,5993)]
#another manner:
#flattened_list: [(34,3),(23,2),(27,3),...,(13,3),(13,2)] len(flattened_list) = 900000
#flattened_label: [(0,0,0,0,...),(1,1,1,1,...),(2,2,2,2,2....),......,(5992,5992,5992,5992,....,5992),(5993,5993,5993,...,5993)]
## Data in random order
mixid = numpy.random.permutation(len(flattened_label))
mixlabel = []
mixmap = []
#mixid = [55,107,200,500,2,10,17777,29999,1232,8,.......,....] (len(mixid) = 900000)
#mixlabel = [1,2,5,10,0,...]
#mixmap = [55,107,200,500,...]
## Prevent two pairs of the same speaker in the same batch
#e.g. batch_size = 128
#len(mixlabel) <= 128
#len(mixlabel) = 133: startbatch = (133-5=128)
#only focus on the current batch
for ii in mixid:
startbatch = len(mixlabel) - len(mixlabel) % self.batch_size
if flattened_label[ii] not in mixlabel[startbatch:]:
mixlabel.append(flattened_label[ii])
mixmap.append(ii)
## This is for MP and MMP
#for ii in mixid:
# startbatch = len(mixlabel) - len(mixlabel) % self.batch_size
# mixlabel.append(flattened_label[ii])
# mixmap.append(ii)
self.data_list = [flattened_list[i] for i in mixmap] #this is final data that does not have same labels in the same batch
self.data_label = [flattened_label[i] for i in mixmap]
## Iteration size
self.nFiles = len(self.data_label); #900000
### Make and Execute Threads...
for index in range(0, self.nWorkers): #so that num_workers * batch?
self.dataLoaders.append(threading.Thread(target = self.dataLoaderThread, args = [index]));
self.dataLoaders[-1].start(); #put batch data and label into queue
return self;
def __next__(self):
while(True):
isFinished = True;
if(self.datasetQueue.empty() == False):
return self.datasetQueue.get(); #one batch
for index in range(0, self.nWorkers):
if(self.dataLoaders[index].is_alive() == True):
isFinished = False;
break;
if(isFinished == False):
time.sleep(1.0);
continue;
for index in range(0, self.nWorkers):
self.dataLoaders[index].join();
self.dataLoaders = [];
raise StopIteration;
def __call__(self):
pass;
def getDatasetName(self):
return self.dataset_file_name;
def qsize(self):
return self.datasetQueue.qsize();
|
solr_helpers.py
|
__author__= "Claire L Mattoon, Johnny Edward, Eric Ziecker"
import xml.etree.ElementTree as etree
import aristotle.settings as settings
import sunburnt
from app_helpers import repository
from multiprocessing import Process, Queue
print("AFTER IMPORT")
SOLR_QUEUE = Queue(maxsize=5)
MODS_NS = 'http://www.loc.gov/mods/v3'
solr_server = sunburnt.SolrInterface(settings.SOLR_URL)
FIELDNAMES = [
'access', # Should have a constant value of "Online"
'author', #namePart
'bib_num', # Pid
'contents', # Should be all of the text of a transcription (if present)
'format', #! Incorrect this should not be a subject
'full_title', #title
'id', #system generated, should be the PID
'location', #! Incorrect, this should be a constant of dacc
'notes', #! Incorrect, only include public access notes (not record notes), abstract
'personal_name', #namePart
'summary', # abstract
'title', # title
'topic', #subject
'url', # Should be the URL in the location
]
def get_title(mods):
"""
Function takes the objects MODS and extracts and returns the text of the title.
"""
title = mods.find("{{{0}}}titleInfo/{{{0}}}title".format(MODS_NS))
if title is not None:
return title.text
def get_topics(mods):
"""
Function takes the objects MODS and returns the text of the topics.
"""
output = []
topics = mods.findall("{{{0}}}subject/{{{0}}}topic".format(MODS_NS))
for topic in topics:
output.append(topic.text)
return output
def get_creators(mods):
"""
Function takes the object's MODS and extracts and returns the text of the
author or creator.
:param mods: Etree XML of MODS datastream
:rtype: List of creator names
"""
output = []
all_names = mods.findall("{{{0}}}name".format(MODS_NS))
for name in all_names:
roleTerm = name.find("{{{0}}}role/{{{0}}}roleTerm".format(MODS_NS))
if roleTerm.text == 'creator':
namePart = name.find("{{{0}}}namePart".format(MODS_NS))
output.append(namePart.text)
return output
def get_description(mods):
"""
Extracts a description from various MODS elements
:param mods: Etree XML of MODS datastream
:rtype: A list of description strings
"""
output = []
physical_desc = mods.find("{{{0}}}physicalDescription".format(MODS_NS))
if physical_desc is not None:
extent = physical_desc.find("{{{0}}}extent".format(MODS_NS))
if extent is not None:
output.append(extent.text)
origin = physical_desc.find("{{{0}}}digitalOrigin".format(MODS_NS))
if origin is not None:
output.append(origin.text)
return output
def get_format(mods):
"""
Extracts format from the genre field
:param mods: Etree XML of MODS datastream
"""
genre = mods.find("{{{0}}}genre".format(MODS_NS))
if genre is not None:
return genre.text
def get_mods(pid):
"""
Function attempts to extract the MODS datastream from the digital
repository
:param pid: PID of the object
:rtype: Etree of the MODS datastream
"""
# Save results of attempting to retrieve the MODS datstream from the
# repository
mods_result = repository.api.getDatastreamDissemination(pid=pid,
dsID="MODS")
# Gets the raw XML from the result
mods_xml = mods_result[0]
# Returns the etree MODS xml object from the raw XML
return etree.XML(mods_xml)
def get_notes(mods):
"""
Function extracts all notes fields from MODS
:param mods: Etree of the MODS datastream
"""
notes = []
all_notes = mods.find("{{{0}}}note".format(MODS_NS))
if all_notes is None:
return notes
for note in all_notes:
displayLabel = note.attribt.get('displayLabel')
if displayLabel is not None:
text = "{0} {1}".format(displayLabel, note.text)
else:
text = note.text
notes.append(text)
return notes
def get_publisher(mods):
"""
Function extracts publisher from MODS
:param mods: Etree of the MODS datastream
"""
publisher = mods.find("{{{0}}}originInfo/{{0}}publisher".format(MODS_NS))
if publisher is not None:
return publisher.text
def get_published_year(mods):
"""
Function extracts publisher from MODS
:param mods: Etree of the MODS datastream
"""
dateCreated = mods.find("{{{0}}}originInfo/{{0}}dateCreated".format(MODS_NS))
if dateCreated is not None:
return dateCreated.text
def get_summary(mods):
"""
Function extracts abstract from MODS and returns text.
"""
summary = mods.find("{{{0}}}abstract".format(MODS_NS))
if summary is not None:
return summary.text
def get_text(solr_doc,mods):
"""
Function adds most of MODS record into general text field for
searching
:param solr_doc: Solr document dictionary
:param mods: Etree of the MODS datastream
"""
output = []
for key,value in solr_doc.iteritems():
if ['access','bib_num','id'].count(key) < 1:
output.append(value)
return output
def get_url(mods):
"""
Function extracts URL location from MODS and returns text.
"""
url = mods.find("{{{0}}}location/{{{0}}}url".format(MODS_NS))
if url is not None:
return url.text
def index_collection(collection_pid='coccc:top',recursive=True):
"""
Method indexes all child elements in a Fedora Collection, if
recursive is True, any collections in the children will call
index_collection function for that child pid.A
:param collection_pid: Collection of PID, default is top-level collection
object for the repository
:param recursive: Boolean, if True will call the index_collection on any
subcollections in the collection
"""
get_collection_sparql = '''PREFIX fedora: <info:fedora/fedora-system:def/relations-external#>
SELECT ?a
FROM <#ri>
WHERE
{
?a fedora:isMemberOfCollection <info:fedora/%s>
}
''' % collection_pid
csv_reader = repository.risearch.sparql_query(get_collection_sparql)
for row in csv_reader:
result = row.get('a')
pid = result.split("/")[1]
relationship = etree.XML(repository.api.getRelationship(pid)[0])
index_digital_object(pid=pid)
def index_digital_object(**kwargs):
pid = kwargs.get('pid')
mods = get_mods(pid)
if kwargs.has_key('format'):
formatOf = kwargs.get('format')
else:
formatOf = get_format(mods)
if formatOf is None:
formatOf = 'Unknown'
else:
formatOf
solr_doc = {'access':'Online',
'bib_num':pid,
'format':formatOf.title(),
'location':'Digital Archives of Colorado College (DACC)',
'id':pid}
solr_doc['author'] = get_creators(mods)
solr_doc['description'] = get_description(mods)
solr_doc['title'] = get_title(mods)
solr_doc['full_title'] = solr_doc['title']
solr_doc['topic'] = get_topics(mods)
solr_doc['summary'] = get_summary(mods)
solr_doc['notes'] = get_notes(mods)
solr_doc['personal_name'] = solr_doc['author']
solr_doc['publisher'] = get_publisher(mods)
solr_doc['pubyear'] = get_published_year(mods)
solr_doc['text'] = get_text(solr_doc,mods)
solr_doc['url'] = get_url(mods)
print("Adding {0} with format {1} to Solr index".format(solr_doc['id'],
solr_doc['format']))
solr_server.add(solr_doc)
solr_server.commit()
def index_manuscript(pid):
"""
Function takes PID, extracts MODS, creates Solr document and attempts to ingest into Solr.
"""
index_digital_object(pid=pid,format='Manuscript')
def index_process(dig_obj,queue):
"""
Function adds result of indexing fedora digital object into
Solr index.
:param dig_obj: Digital Object
"""
print("In index_process")
index_digital_object(pid=dig_obj.pid)
queue.put("Indexed {0} with PID={1} into Solr Index".format(dig_obj.label,dig_obj.pid))
def start_indexing(pid_prefix='coccc'):
"""
Function starts Solr indexing queue for all objects in
the repository.
:param pid_prefix: PID prefix to search, defaults to CC
"""
query = "{0}*".format(pid_prefix)
print("Before get pid generator {0}".format(query))
all_pids_generator = repository.find_objects(query = "{0}*".format(pid_prefix))
print("after get pid generator {0}".format(all_pids_generator))
while 1:
try:
print("Before extracting next digital object")
digital_object = next(all_pids_generator)
print("Digital object PID={0}".format(digital_object.pid))
process = Process(target=index_process, args=(digital_object,SOLR_QUEUE))
process.start()
#process.join()
except:
break
|
registrar_common.py
|
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import base64
import threading
import sys
import signal
import time
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
import simplejson as json
from keylime.db.registrar_db import RegistrarMain
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import cloud_verifier_common
from keylime import config
from keylime import crypto
from keylime.tpm import tpm2_objects
from keylime import keylime_logging
from keylime.tpm.tpm_main import tpm
logger = keylime_logging.init_logging('registrar')
try:
engine = DBEngineManager().make_engine('registrar')
except SQLAlchemyError as e:
logger.error('Error creating SQL engine: %s', e)
sys.exit(1)
class ProtectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to retrieve status on agents from the Registrar Server.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
config.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id %s not found.', agent_id)
return
if not agent.active:
config.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id %s not yet active.', agent_id)
return
response = {
'aik_tpm': agent.aik_tpm,
'ek_tpm': agent.ek_tpm,
'ekcert': agent.ekcert,
'regcount': agent.regcount,
}
if agent.virtual:
response['provider_keys'] = agent.provider_keys
config.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id: %s', agent_id)
else:
# return the available registered uuids from the DB
json_response = session.query(RegistrarMain.agent_id).all()
return_response = [item[0] for item in json_response]
config.echo_json_response(self, 200, "Success", {
'uuids': return_response})
logger.info('GET returning 200 response for agent_id list')
return
def do_POST(self):
"""POST not supported"""
config.echo_json_response(
self, 405, "POST not supported via TLS interface")
def do_PUT(self):
"""PUT not supported"""
config.echo_json_response(
self, 405, "PUT not supported via TLS interface")
def do_DELETE(self):
"""This method handles the DELETE requests to remove agents from the Registrar Server.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
if session.query(RegistrarMain).filter_by(agent_id=agent_id).delete():
# send response
try:
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
config.echo_json_response(self, 200, "Success")
return
# send response
config.echo_json_response(self, 404)
return
config.echo_json_response(self, 404)
# pylint: disable=W0622
def log_message(self, format, *args):
return
class UnprotectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""GET not supported"""
config.echo_json_response(self, 405, "GET not supported")
def do_POST(self):
"""This method handles the POST requests to add agents to the Registrar Server.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's
will return errors. POST requests require an an agent_id identifying the agent to add, and json
block sent in the body with 2 entries: ek and aik.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ekcert = json_body['ekcert']
aik_tpm = json_body['aik_tpm']
initialize_tpm = tpm()
if ekcert is None or ekcert == 'emulator':
logger.warning('Agent %s did not submit an ekcert' % agent_id)
ek_tpm = json_body['ek_tpm']
else:
if 'ek_tpm' in json_body:
# This would mean the agent submitted both a non-None ekcert, *and*
# an ek_tpm... We can deal with it by just ignoring the ek_tpm they sent
logger.warning('Overriding ek_tpm for agent %s from ekcert' % agent_id)
# If there's an EKCert, we just overwrite their ek_tpm
# Note, we don't validate the EKCert here, other than the implicit
# "is it a valid x509 cert" check. So it's still untrusted.
# This will be validated by the tenant.
ek509 = load_der_x509_certificate(
base64.b64decode(ekcert),
backend=default_backend(),
)
ek_tpm = base64.b64encode(
tpm2_objects.ek_low_tpm2b_public_from_pubkey(
ek509.public_key(),
)
)
aik_attrs = tpm2_objects.get_tpm2b_public_object_attributes(
base64.b64decode(aik_tpm),
)
if aik_attrs != tpm2_objects.AK_EXPECTED_ATTRS:
config.echo_json_response(
self, 400, "Invalid AK attributes")
logger.warning(
"Agent %s submitted AIK with invalid attributes! %s (provided) != %s (expected)",
agent_id,
tpm2_objects.object_attributes_description(aik_attrs),
tpm2_objects.object_attributes_description(tpm2_objects.AK_EXPECTED_ATTRS),
)
return
# try to encrypt the AIK
(blob, key) = initialize_tpm.encryptAIK(
agent_id,
base64.b64decode(ek_tpm),
base64.b64decode(aik_tpm),
)
# special behavior if we've registered this uuid before
regcount = 1
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound:
agent = None
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent.regcount
if agent.ek_tpm != ek_tpm or agent.ekcert != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
try:
session.query(RegistrarMain).filter_by(
agent_id=agent_id).delete()
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
# Add values to database
d = {}
d['agent_id'] = agent_id
d['ek_tpm'] = ek_tpm
d['aik_tpm'] = aik_tpm
d['ekcert'] = ekcert
d['virtual'] = int(ekcert == 'virtual')
d['active'] = int(False)
d['key'] = key
d['provider_keys'] = {}
d['regcount'] = regcount
try:
session.add(RegistrarMain(**d))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
response = {
'blob': blob,
}
config.echo_json_response(self, 200, "Success", response)
logger.info('POST returning key blob for agent_id: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("POST for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
def do_PUT(self):
"""This method handles the PUT requests to add agents to the Registrar Server.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's
will return errors.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('PUT agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('PUT for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
auth_tag = json_body['auth_tag']
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound as e:
raise Exception(
"attempting to activate agent before requesting "
"registrar for %s" % agent_id) from e
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if config.STUB_TPM:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
# TODO(kaifeng) Special handling should be removed
if engine.dialect.name == "mysql":
agent.key = agent.key.encode('utf-8')
ex_mac = crypto.do_hmac(agent.key, agent_id)
if ex_mac == auth_tag:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
raise Exception(
"Auth tag %s does not match expected value %s" % (auth_tag, ex_mac))
config.echo_json_response(self, 200, "Success")
logger.info('PUT activated: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("PUT for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
return
def do_DELETE(self):
"""DELETE not supported"""
config.echo_json_response(self, 405, "DELETE not supported")
# pylint: disable=W0622
def log_message(self, format, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class RegistrarServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor overridden to provide ability to read file"""
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
def shutdown(self):
http.server.HTTPServer.shutdown(self)
def do_shutdown(servers):
for server in servers:
server.shutdown()
def start(host, tlsport, port):
"""Main method of the Registrar Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
threads = []
servers = []
serveraddr = (host, tlsport)
RegistrarMain.metadata.create_all(engine, checkfirst=True)
session = SessionManager().make_session(engine)
try:
count = session.query(RegistrarMain.agent_id).count()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if count > 0:
logger.info("Loaded %d public keys from database", count)
server = RegistrarServer(serveraddr, ProtectedHandler)
context = cloud_verifier_common.init_mtls(section='registrar',
generatedir='reg_ca')
if context is not None:
server.socket = context.wrap_socket(server.socket, server_side=True)
thread = threading.Thread(target=server.serve_forever)
threads.append(thread)
# start up the unprotected registrar server
serveraddr2 = (host, port)
server2 = RegistrarServer(serveraddr2, UnprotectedHandler)
thread2 = threading.Thread(target=server2.serve_forever)
threads.append(thread2)
servers.append(server)
servers.append(server2)
logger.info('Starting Cloud Registrar Server on ports %s and %s (TLS) use <Ctrl-C> to stop', port, tlsport)
for thread in threads:
thread.start()
def signal_handler(signum, frame):
del signum, frame
do_shutdown(servers)
sys.exit(0)
# Catch these signals. Note that a SIGKILL cannot be caught, so
# killing this process with "kill -9" may result in improper shutdown
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# keep the main thread active, so it can process the signals and gracefully shutdown
while True:
if not any([thread.is_alive() for thread in threads]):
# All threads have stopped
break
# Some threads are still going
time.sleep(1)
for thread in threads:
thread.join()
|
loca_navi_demo.py
|
import argparse, copy
import multiprocessing, time
from lib.navigation import Navigation
from Map.map_plotter import Plotter
from distutils.util import strtobool
parser = argparse.ArgumentParser()
parser.add_argument("--scene_type", type=int, default=1, help="Choose scene type for simulation, 1 for Kitchens, 2 for Living rooms, 3 for Bedrooms, 4 for Bathrooms")
parser.add_argument("--scene_num", type=int, default=30, help="Choose scene num for simulation, from 1 - 30")
parser.add_argument("--grid_size", type=float, default=0.25, help="Grid size of AI2THOR simulation")
parser.add_argument("--rotation_step", type=float, default=10, help="Rotation step of AI2THOR simulation")
parser.add_argument("--sleep_time", type=float, default=0.005, help="Sleep time between two actions")
parser.add_argument("--save_directory", type=str, default='./data', help="Data saving directory")
parser.add_argument("--overwrite_data", type=lambda x: bool(strtobool(x)), default=False, help="overwrite the existing data or not")
parser.add_argument("--log_level", type=int, default=2, help="Level of showing log 1-5 where 5 is most detailed")
parser.add_argument("--debug", type=lambda x: bool(strtobool(x)), default=False, help="Output debug info if True")
parser.add_argument("--test_data", type=lambda x: bool(strtobool(x)), default=False, help="True for collecting test dataset")
parser.add_argument("--special", type=lambda x: bool(strtobool(x)), default=False, help="True for collecting special long range dataset")
parser.add_argument("--AI2THOR", type=lambda x: bool(strtobool(x)), default=False, help="True for RobotTHOR false for ITHOR")
args = parser.parse_args()
def navigation_fcn(server, comfirmed, initialized):
navigation = Navigation(netName='rnet', scene_type=args.scene_type, scene_num=args.scene_num, save_directory=args.save_directory, AI2THOR=args.AI2THOR, server=server, comfirmed=comfirmed)
navigation.Update_node_generator()
navigation.Update_topo_map_env()
navigation.Update_planner_env()
# Send information to initialize plot map
scene_info = navigation.Robot._AI2THOR_controller.get_scene_info()
server.send(scene_info)
# Navigation task
navigation.node_generator.Shuffle_scene()
navigation.Closed_loop_nav(current_node_index=10, current_orientation=0, goal_node_index=5, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=10, current_orientation=180, goal_node_index=9, goal_orientation=180)
# navigation.Closed_loop_nav(current_node_index=9, current_orientation=180, goal_node_index=3, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=1, current_orientation=0, goal_node_index=16, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=16, current_orientation=0, goal_node_index=3, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=3, current_orientation=0, goal_node_index=4, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=4, current_orientation=0, goal_node_index=4, goal_orientation=90)
# navigation.nav_test_simplified()
# while True:
# if initialized.value:
# navigation.Closed_loop_nav(goal_node_index=3, goal_orientation=270)
# navigation.Closed_loop_nav(goal_node_index=2, goal_orientation=270)
# break
def visualization_fcn(client, comfirmed, initialized):
scene_info = client.recv()
visualization_panel = Plotter(*scene_info, client=client, comfirmed=comfirmed)
initialized.value = 1
while True:
visualization_panel.show_map()
if __name__ == '__main__':
comfirmed = multiprocessing.Value('i') # Int value: 1 for confirm complete task and other process can go on while 0 otherwise
comfirmed.value = 0
initialized = multiprocessing.Value('i') # Int value
initialized.value = 0
server, client = multiprocessing.Pipe() # server send date and client receive data
navi_node = multiprocessing.Process(target=navigation_fcn, args=(server, comfirmed, initialized))
visual_node = multiprocessing.Process(target=visualization_fcn, args=(client, comfirmed, initialized))
navi_node.start()
visual_node.start()
navi_node.join()
visual_node.join()
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import traceback
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
from .helpers import get_debug_flag
from . import __version__
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
elif filename.endswith('.py'):
filename = filename[:-3]
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
try:
__import__(module)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise
else:
raise NoAppException('The file/path provided (%s) does not appear'
' to exist. Please verify the path is '
'correct. If app is not on PYTHONPATH, '
'ensure the extension is .py' % module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_default_import_path():
app = os.environ.get('FLASK_APP')
if app is None:
return
if os.path.isfile(app):
return prepare_exec_for_file(app)
return app
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True, is_eager=True)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
if create_app is None:
if app_import_path is None:
app_import_path = find_default_import_path()
self.app_import_path = app_import_path
else:
app_import_path = None
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if not self.app_import_path:
raise NoAppException(
'Could not locate Flask application. You did not provide '
'the FLASK_APP environment variable.\n\nFor more '
'information see '
'http://flask.pocoo.org/docs/latest/quickstart/')
rv = locate_app(self.app_import_path)
debug = get_debug_flag()
if debug is not None:
rv.debug = debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
# Set a global flag that indicates that we were invoked from the
# command line interface provided server command. This is detected
# by Flask.run to make the call into a no-op. This is necessary to
# avoid ugly errors when the script that is loaded here also attempts
# to start a server.
os.environ['FLASK_RUN_FROM_CLI_SERVER'] = '1'
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a bit on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (through the FLASK_APP environment
variable) and then provides commands either provided by the application or
Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
\b
%(prefix)s%(cmd)s FLASK_APP=hello.py
%(prefix)s%(cmd)s FLASK_DEBUG=1
%(prefix)sflask run
""" % {
'cmd': os.name == 'posix' and 'export' or 'set',
'prefix': os.name == 'posix' and '$ ' or '',
})
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flask.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
real_time.py
|
#done changes
import os
import sys
import json
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from threading import Thread
from threading import Lock
import time
import cv2
from torch.autograd import Variable
import torch.nn.functional as F
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_training_set, get_validation_set, get_test_set
from utils import Logger
from train import train_epoch
from validation import val_epoch
import test
def readwebcam():
cap = cv2.VideoCapture(0)
global frames
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(frame.shape)
# Display the resulting frame
cv2.imshow('frame', frame)
frame = np.transpose(frame, (2, 0, 1))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
lock.acquire()
frames = np.delete(frames, (0), axis=0)
frames = np.append(frames, frame.reshape(1, 3, 480, -1), axis=0)
lock.release()
#time.sleep(0.1)
# When everything done, release the capture
cap.release()
if __name__ == '__main__':
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
print(model)
criterion = nn.CrossEntropyLoss()
if not opt.no_cuda:
criterion = criterion.cuda()
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
"""if opt.test:
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = VideoID()
test_data = get_test_set(opt, spatial_transform, temporal_transform,
target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
test.test(test_loader, model, opt, test_data.class_names)
"""
lock = Lock()
t = Thread(target=readwebcam)
t.start()
frames = np.random.rand(16, 3, 480, 640)
model.eval()
classes = ['abort', 'circle', 'hello', 'no', 'stop', 'turn', 'turn_left', 'turn_right', 'warn']
while (True):
lock.acquire()
print(frames[1])
lock.release()
# inputs type and shape
#<class 'torch.Tensor'>
#torch.Size([10, 3, 16, 112, 112])
#TODO add transformations
inputs = torch.unsqueeze(torch.from_numpy(frames), 0).permute(0, 2, 1, 3, 4)
print(inputs.size())
inputs = Variable(inputs, volatile=True)
outputs = model(inputs)
outputs = F.softmax(outputs)
_, ind = torch.max(outputs)
print(classes[ind])
#time.sleep(1)
t.join()
|
utils.py
|
#!/usr/bin/env python3
# encoding: utf-8
import os
import sys
import comm.global_var as gl
try:
import ConfigParser
except:
try:
import configparser as ConfigParser
except:
from six.moves import configparser as ConfigParser
if sys.version_info.major == 2:
import commands
else:
import subprocess
from . import log as deployLog
import socket
import fcntl
import struct
import telnetlib
import platform
import shutil
import json
from urllib import request
from threading import Thread
from distutils.dir_util import copy_tree
log = deployLog.getLocalLogger()
platformStr = platform.platform()
unameStr = platform.uname()[1]
def getIpAddress(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def getLocalIp():
return getIpAddress("eth0")
def net_if_used(ip,port):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(0.5)
try:
result=s.connect_ex((ip, int(port)))
if result==0:
print (" error! port {} has been used. please check.".format(port))
return True
else:
return False
finally:
s.close()
def net_if_used_no_msg(ip,port):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(0.5)
try:
result=s.connect_ex((ip, int(port)))
if result==0:
return True
else:
return False
finally:
s.close()
def isUbuntu():
return platformStr.lower().find("ubuntu") > -1 or unameStr.lower().find("ubuntu") > -1
def isCentos():
# support redhat
return platformStr.lower().find("centos") > -1 or unameStr.lower().find("centos") > -1 or unameStr.lower().find("redhat") > -1
def isSuse():
return platformStr.lower().find("suse") > -1 or unameStr.lower().find("suse") > -1
def getBaseDir():
cwd = os.getcwd()
log.info(" os.getcwd() is {}".format(cwd))
path = os.path.abspath(os.path.join(os.getcwd(), ".."))
return path
def getCurrentBaseDir():
cwd = os.getcwd()
log.info(" os.getcwd() is {}".format(cwd))
path = os.path.abspath(os.path.join(os.getcwd(), "."))
return path
def copytree(src, dst):
copy_tree(src,dst)
return
def doCmd(cmd):
log.info(" execute cmd start ,cmd : {}".format(cmd))
result = dict()
if sys.version_info.major == 2:
(status, output) = commands.getstatusoutput(cmd)
else:
(status, output) = subprocess.getstatusoutput(cmd)
result["status"] = status
result["output"] = output
log.info(" execute cmd end ,cmd : {},status :{} , output: {}".format(cmd,status,output))
if (0 != status):
raise Exception("execute cmd error ,cmd : {}, status is {} ,output is {}".format(cmd,status, output))
return result
def doCmdIgnoreException(cmd):
log.info(" execute cmd start ,cmd : {}".format(cmd))
result = dict()
if sys.version_info.major == 2:
(status, output) = commands.getstatusoutput(cmd)
else:
(status, output) = subprocess.getstatusoutput(cmd)
result["status"] = status
result["output"] = output
log.info(" execute cmd end ,cmd : {},status :{} , output: {}".format(cmd, status, output))
return result
def getCommProperties(paramsKey):
current_dir = getCurrentBaseDir()
cf = ConfigParser.ConfigParser()
propertiesDir =current_dir + '/' + gl.get_file()
cf.read(propertiesDir)
log.info(" commProperties is {} ".format(propertiesDir))
cf.sections()
value = cf.get('common', paramsKey,fallback=None)
return value
def replaceConf(fileName,oldStr,newStr):
if not os.path.isfile(fileName):
print ("{} is not a file ".format(fileName))
return
oldData =""
with open(fileName, "r") as f:
for line in f:
if oldStr in line:
line = line.replace(oldStr, newStr)
oldData += line
with open(fileName, "w") as f:
f.write(oldData)
return
def replaceConfDir(filePath,oldStr,newStr):
if not os.path.isdir(filePath):
print ("{} is not a dir ".format(filePath))
return
for root, dirs, files in os.walk(filePath):
for file in files:
replaceConf(os.path.join(root,file),oldStr,newStr)
return
def copyFiles(sourceDir, targetDir):
log.info(" copyFiles sourceDir: {} ".format(sourceDir))
for f in os.listdir(sourceDir):
sourceF = os.path.join(sourceDir, f)
targetF = os.path.join(targetDir, f)
if os.path.isfile(sourceF):
# check dir
if not os.path.exists(targetDir):
os.makedirs(targetDir)
# copy file
shutil.copy(sourceF,targetF)
# check sub folder
if os.path.isdir(sourceF):
copyFiles(sourceF, targetF)
def do_telnet(host,port):
try:
tn = telnetlib.Telnet(host, port, timeout=5)
tn.close()
except:
return False
return True
def pullDockerImage(gitComm,fileName,repo_name):
if not os.path.exists("{}/{}".format(getCurrentBaseDir(),fileName)):
print (gitComm)
os.system(gitComm)
else:
info = "n"
if sys.version_info.major == 2:
info = raw_input("{} already exists. Download again or not?[y/n]:".format(fileName))
else:
info = input("{} already exists. Download again or not?[y/n]:".format(fileName))
if info == "y" or info == "Y":
doCmd("rm -rf {}".format(fileName))
print (gitComm)
os.system(gitComm)
doCmd("sudo docker load -i {}".format(fileName))
result = doCmd("docker image ls {} | wc -l".format(repo_name))
print ("Uzip image result {} ".format(result))
if int(result["output"]) <= 1 :
print ("Unzip docker image from file {} failed!".format(fileName))
sys.exit(0)
def pullSourceExtract(gitComm,fileName):
if not os.path.exists("{}/{}.zip".format(getCurrentBaseDir(),fileName)):
print (gitComm)
os.system(gitComm)
else:
info = "n"
if sys.version_info.major == 2:
info = raw_input("{}.zip already exists. Download again or not?[y/n]:".format(fileName))
else:
info = input("{}.zip already exists. Download again or not?[y/n]:".format(fileName))
if info == "y" or info == "Y":
doCmd("rm -rf {}.zip".format(fileName))
doCmd("rm -rf {}".format(fileName))
print (gitComm)
os.system(gitComm)
if not os.path.exists("{}/{}".format(getCurrentBaseDir(),fileName)):
doCmd("unzip -o {}.zip".format(fileName))
if not os.path.exists("{}/{}".format(getCurrentBaseDir(),fileName)):
print ("{}.zip extract failed!".format(fileName))
sys.exit(0)
else:
info1 = "n"
if sys.version_info.major == 2:
info1 = raw_input("{}.zip has been unzipped. Whether to re-unzip?[y/n]:".format(fileName))
else:
info1 = input("{}.zip has been unzipped. Whether to re-unzip?[y/n]:".format(fileName))
if info1 == "y" or info1 == "Y":
doCmd("rm -rf {}".format(fileName))
doCmd("unzip -o {}.zip".format(fileName))
if not os.path.exists("{}/{}".format(getCurrentBaseDir(),fileName)):
print ("{}.zip extract failed!".format(fileName))
sys.exit(0)
def checkFileName(dir,fileName):
Files=os.listdir(dir)
for k in range(len(Files)):
Files[k]=os.path.splitext(Files[k])[0]
fileName = fileName + ".mv"
if fileName in Files:
return True
else:
return False
def get_str_btw(s, f, b):
par = s.partition(f)
return (par[2].partition(b))[0][:]
def rest_get(url):
log.info("rest_get url: {}".format(url))
res = request.urlopen(url)
log.info(res.read())
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
if __name__ == '__main__':
print(getIpAddress("eth0"))
pass
|
get_errors.py
|
#! /usr/bin/env python3
import os
import re
import json
import logging
from multiprocessing import Process, Queue, current_process
from collections import OrderedDict
import numpy as np
import click
from data_iterator import TextIterator
from params import load_params
logging.basicConfig(level=logging.WARN,
format="%(asctime)s - %(levelname)s %(module)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
import multiprocessing_logging
multiprocessing_logging.install_mp_handler()
def error_process(params, device, **model_options):
import theano
import theano.sandbox.cuda
from build_model import build_model
theano.sandbox.cuda.use(device)
tparams = OrderedDict()
for param_name, param in params.items():
tparams[param_name] = theano.shared(param, name=param_name)
process_name = current_process().name
logging.info("building and compiling theano functions ({})".format(process_name))
inputs, cost, _ = build_model(tparams, **model_options)
f_cost = theano.function(inputs, cost)
while True:
cur_data = in_queue.get()
if cur_data == "STOP":
break
out_queue.put(f_cost(*cur_data))
def get_error(model_files, dicts, source_file, target_file, devices):
logging.info("Loading model options from {}".format(model_files[0]))
with open(model_files[0], "r") as f:
model_options = json.load(f)
global dictionaries
logging.info("loading dictionaries from {}, {}".format(*dicts))
with open(dicts[0], "r") as f1, open(dicts[1], "r") as f2:
dictionaries = [json.load(f1), json.load(f2)]
logging.info("loading parameters from {}".format(model_files[1]))
params = load_params(model_files[1])
global in_queue
global out_queue
in_queue = Queue()
out_queue = Queue()
processes = [Process(target=error_process, name="process_{}".format(device),
args=(params, device), kwargs=model_options)
for device in devices.split(",")]
for p in processes:
p.daemon = True
p.start()
ti = TextIterator(source_file=source_file, target_file=target_file,
source_dict=dictionaries[0], target_dict=dictionaries[1],
maxlen=model_options["maxlen"],
n_words_source=model_options["n_words_source"],
n_words_target=model_options["n_words_target"],
raw_characters=model_options["characters"])
num_batches = 0
for batch in ti:
in_queue.put(batch)
num_batches += 1
for _ in processes:
in_queue.put("STOP")
costs = []
for num_processed in range(num_batches):
costs.append(out_queue.get())
percentage_done = (num_processed / num_batches) * 100
print("{}: {:.2f}% of input processed".format(model_files[1], percentage_done),
end="\r", flush=True)
print()
mean_cost = np.mean(costs)
print(model_files[1], mean_cost)
return mean_cost
command_group = click.Group()
@command_group.command()
@click.argument("model-files", type=click.Path(exists=True, dir_okay=False), nargs=2)
@click.argument("dicts", type=click.Path(exists=True, dir_okay=False), nargs=2)
@click.argument("source-file", type=click.Path(exists=True, dir_okay=False))
@click.argument("target-file", type=click.Path(exists=True, dir_okay=False))
@click.option("--devices", default="cpu,cpu,cpu,cpu",
help="comma separated list of devices to run training with the asynchronous "
"algorithms; see `'theano.sandbox.cuda.run'`for more information; "
"only the first one is used in case a sequential optimization algorithm is used")
def eval_one_model(model_files, dicts, source_file, target_file, devices):
get_error(model_files, dicts, source_file, target_file, devices)
@command_group.command()
@click.argument("model-dir", type=click.Path(exists=True, dir_okay=True))
@click.argument("dicts", type=click.Path(exists=True, dir_okay=False), nargs=2)
@click.argument("source-file", type=click.Path(exists=True, dir_okay=False))
@click.argument("target-file", type=click.Path(exists=True, dir_okay=False))
@click.option("--devices", default="cpu,cpu,cpu,cpu",
help="comma separated list of devices to run training with the asynchronous "
"algorithms; see `'theano.sandbox.cuda.run'`for more information; "
"only the first one is used in case a sequential optimization algorithm is used")
@click.option("--out-file", type=click.Path(exists=False, dir_okay=False),
help="writes output to this file additional to stdout")
@click.option("--name-format", default=r"epoch_(.+?)_update_(.+?)\.npz",
help="format of model names as regex to parse number of updates (first mathcing group)"
"and number of epochs (second matching group) from")
def eval_multiple_models(model_dir, dicts, source_file, target_file, devices, out_file, name_format):
"""requires a directory containing npz files and *one* json file with model
options that is valid for all these files
npz files should be named XXX_epoch_EPOCH_update_UPDATE.npz"""
# this needs to recompile the model for every model file and each process
# but otherwise this would require more complicated handling of subprocesses...
files = [os.path.join(model_dir, f) for f in os.listdir(model_dir)
if os.path.isfile(os.path.join(model_dir, f))]
model_npzs = [f for f in files if os.path.splitext(f)[1] == ".npz"]
model_option_file = [f for f in files if os.path.splitext(f)[1] == ".json"][0]
nf = re.compile(name_format)
m_infos = []
for i, m in enumerate(model_npzs, 1):
re_match = re.search(nf, m)
if re_match:
epoch = int(re_match.group(1))
update = int(re_match.group(2))
time = os.path.getmtime(m)
cost = get_error((model_option_file, m), dicts, source_file, target_file, devices)
m_infos.append((time, epoch, update, cost, m))
print("processed {}/{} models".format(i, len(model_npzs)))
else:
print("{} did not match name format!".format(m))
m_infos = sorted(m_infos, key=lambda x: x[0])
for m_info in m_infos:
print("\t".join(map(str, m_info)))
if out_file:
with open(out_file, "w") as f:
f.write("time,epoch,update,cost,model\n")
for m_info in m_infos:
f.write(",".join(map(str, m_info)) + "\n")
if __name__ == '__main__':
command_group()
|
mp.py
|
import os
import pickle
import struct
import sys
from functools import partial
from multiprocessing import Process, Lock, Event as ProcessEvent
from multiprocessing.pool import ThreadPool
from threading import Thread, Event as TrEvent
from time import sleep, time
from typing import List, Dict
import psutil
from six.moves.queue import Empty, Queue as TrQueue
from ..py3_interop import AbstractContextManager
try:
from multiprocessing import SimpleQueue
except ImportError: # noqa
from multiprocessing.queues import SimpleQueue
class SingletonThreadPool(object):
__thread_pool = None
__thread_pool_pid = None
@classmethod
def get(cls):
if os.getpid() != cls.__thread_pool_pid:
cls.__thread_pool = ThreadPool(1)
cls.__thread_pool_pid = os.getpid()
return cls.__thread_pool
@classmethod
def clear(cls):
if cls.__thread_pool:
cls.__thread_pool.close()
cls.__thread_pool = None
cls.__thread_pool_pid = None
class SafeQueue(object):
"""
Many writers Single Reader multiprocessing safe Queue
"""
__thread_pool = SingletonThreadPool()
def __init__(self, *args, **kwargs):
self._reader_thread = None
self._q = SimpleQueue(*args, **kwargs)
# Fix the simple queue write so it uses a single OS write, making it atomic message passing
# noinspection PyBroadException
try:
self._q._writer._send_bytes = partial(SafeQueue._pipe_override_send_bytes, self._q._writer)
except Exception:
pass
self._internal_q = None
self._q_size = 0
def empty(self):
return self._q.empty() and (not self._internal_q or self._internal_q.empty())
def is_pending(self):
# only call from main put process
return self._q_size > 0 or not self.empty()
def close(self, event):
# wait until all pending requests pushed
while self.is_pending():
if event:
event.set()
sleep(0.1)
def get(self, *args, **kwargs):
return self._get_internal_queue(*args, **kwargs)
def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
buffer = []
timeout_count = int(timeout/throttle_sleep)
empty_count = timeout_count
while len(buffer) < max_items:
while not self.empty() and len(buffer) < max_items:
try:
buffer.append(self._get_internal_queue(block=False))
empty_count = 0
except Empty:
break
empty_count += 1
if empty_count > timeout_count or len(buffer) >= max_items:
break
sleep(throttle_sleep)
return buffer
def put(self, obj):
# GIL will make sure it is atomic
self._q_size += 1
# make sure the block put is done in the thread pool i.e. in the background
obj = pickle.dumps(obj)
self.__thread_pool.get().apply_async(self._q_put, args=(obj, ))
def _q_put(self, obj):
self._q.put(obj)
# GIL will make sure it is atomic
self._q_size -= 1
def _get_internal_queue(self, *args, **kwargs):
if not self._internal_q:
self._internal_q = TrQueue()
if not self._reader_thread:
self._reader_thread = Thread(target=self._reader_daemon)
self._reader_thread.daemon = True
self._reader_thread.start()
obj = self._internal_q.get(*args, **kwargs)
# deserialize
return pickle.loads(obj)
def _reader_daemon(self):
# pull from process queue and push into thread queue
while True:
# noinspection PyBroadException
try:
obj = self._q.get()
if obj is None:
break
except Exception:
break
self._internal_q.put(obj)
@staticmethod
def _pipe_override_send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
class SafeEvent(object):
__thread_pool = SingletonThreadPool()
def __init__(self):
self._event = ProcessEvent()
def is_set(self):
return self._event.is_set()
def set(self):
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self):
return self._event.clear()
def wait(self, timeout=None):
return self._event.wait(timeout=timeout)
class SingletonLock(AbstractContextManager):
_instances = []
def __init__(self):
self._lock = None
SingletonLock._instances.append(self)
def acquire(self, *args, **kwargs):
self.create()
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._lock is None:
return None
return self._lock.release(*args, **kwargs)
def create(self):
if self._lock is None:
self._lock = Lock()
@classmethod
def instantiate(cls):
for i in cls._instances:
i.create()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
if any((exc_type, exc_value, traceback,)):
raise (exc_type, exc_value, traceback)
class BackgroundMonitor(object):
# If we will need multiple monitoring contexts (i.e. subprocesses) this will become a dict
_main_process = None
_parent_pid = None
_sub_process_started = None
_instances = {} # type: Dict[int, List[BackgroundMonitor]]
def __init__(self, task, wait_period):
self._event = TrEvent()
self._done_ev = TrEvent()
self._start_ev = TrEvent()
self._task_pid = os.getpid()
self._thread = None
self._wait_timeout = wait_period
self._subprocess = None if task.is_main_task() else False
self._task_obj_id = id(task)
def start(self):
if not self._thread:
self._thread = True
self._event.clear()
self._done_ev.clear()
if self._subprocess is False:
# start the thread we are in threading mode.
self._start()
else:
# append to instances
if self not in self._get_instances():
self._get_instances().append(self)
def wait(self, timeout=None):
if not self._done_ev:
return
self._done_ev.wait(timeout=timeout)
def _start(self):
# if we already started do nothing
if isinstance(self._thread, Thread):
return
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def stop(self):
if not self._thread:
return
if not self.is_subprocess() or self.is_subprocess_alive():
self._event.set()
if isinstance(self._thread, Thread):
try:
self._get_instances().remove(self)
except ValueError:
pass
self._thread = None
def daemon(self):
while True:
if self._event.wait(self._wait_timeout):
break
self._daemon_step()
def _daemon(self):
self._start_ev.set()
self.daemon()
self.post_execution()
self._thread = None
def post_execution(self):
self._done_ev.set()
def set_subprocess_mode(self):
# called just before launching the daemon in a subprocess
if not self._subprocess:
self._subprocess = True
if not isinstance(self._done_ev, SafeEvent):
self._done_ev = SafeEvent()
if not isinstance(self._start_ev, SafeEvent):
self._start_ev = SafeEvent()
if not isinstance(self._event, SafeEvent):
self._event = SafeEvent()
def _daemon_step(self):
pass
@classmethod
def start_all(cls, task, wait_for_subprocess=False):
# noinspection PyProtectedMember
execute_in_subprocess = task._report_subprocess_enabled
if not execute_in_subprocess:
for d in BackgroundMonitor._instances.get(id(task), []):
d._start()
elif not BackgroundMonitor._main_process:
cls._parent_pid = os.getpid()
cls._sub_process_started = SafeEvent()
cls._sub_process_started.clear()
# setup
for d in BackgroundMonitor._instances.get(id(task), []):
d.set_subprocess_mode()
BackgroundMonitor._main_process = Process(target=cls._background_process_start, args=(id(task), ))
BackgroundMonitor._main_process.daemon = True
# Hack allow to create daemon subprocesses (even though python doesn't like it)
un_daemonize = False
# noinspection PyBroadException
try:
from multiprocessing import current_process
if current_process()._config.get('daemon'): # noqa
un_daemonize = current_process()._config.get('daemon') # noqa
current_process()._config['daemon'] = False # noqa
except BaseException:
pass
# try to start the background process, if we fail retry again, or crash
for i in range(4):
try:
BackgroundMonitor._main_process.start()
break
except BaseException:
if i < 3:
sleep(1)
continue
raise
if un_daemonize:
# noinspection PyBroadException
try:
from multiprocessing import current_process
current_process()._config['daemon'] = un_daemonize # noqa
except BaseException:
pass
# wait until subprocess is up
if wait_for_subprocess:
cls._sub_process_started.wait()
@classmethod
def _background_process_start(cls, task_obj_id):
is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
# restore original signal, this will prevent any deadlocks
# Do not change the exception we need to catch base exception as well
# noinspection PyBroadException
try:
from ... import Task
# noinspection PyProtectedMember
Task.current_task()._remove_at_exit_callbacks()
except: # noqa
pass
# if a debugger is running, wait for it to attach to the subprocess
if is_debugger_running:
sleep(3)
# launch all the threads
for d in cls._instances.get(task_obj_id, []):
d._start()
if cls._sub_process_started:
cls._sub_process_started.set()
# wait until we are signaled
for i in BackgroundMonitor._instances.get(task_obj_id, []):
# noinspection PyBroadException
try:
if i._thread and i._thread.is_alive():
# DO Not change, we need to catch base exception, if the process gte's killed
try:
i._thread.join()
except: # noqa
break
else:
pass
except: # noqa
pass
# we are done, leave process
return
def is_alive(self):
if self.is_subprocess():
return self.is_subprocess_alive() and self._thread \
and self._start_ev.is_set() and not self._done_ev.is_set()
else:
return isinstance(self._thread, Thread) and self._thread.is_alive()
@classmethod
def is_subprocess_alive(cls):
if not cls._main_process:
return False
# noinspection PyBroadException
try:
return \
cls._main_process.is_alive() and \
psutil.Process(cls._main_process.pid).status() != psutil.STATUS_ZOMBIE
except Exception:
current_pid = cls._main_process.pid
if not current_pid:
return False
try:
parent = psutil.Process(cls._parent_pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
return child.status() != psutil.STATUS_ZOMBIE
return False
def is_subprocess(self):
return self._subprocess is not False and bool(self._main_process)
def _get_instances(self):
return self._instances.setdefault(self._task_obj_id, [])
@classmethod
def is_subprocess_enabled(cls):
return bool(cls._main_process)
@classmethod
def clear_main_process(cls):
cls.wait_for_sub_process()
BackgroundMonitor._main_process = None
BackgroundMonitor._parent_pid = None
BackgroundMonitor._sub_process_started = None
BackgroundMonitor._instances = {}
SingletonThreadPool.clear()
@classmethod
def wait_for_sub_process(cls, timeout=None):
if not cls.is_subprocess_enabled():
return
tic = time()
while cls.is_subprocess_alive() and (not timeout or time()-tic < timeout):
sleep(0.03)
|
player.py
|
import logging
import threading
import time
from collections import defaultdict
from sound_player.common import StatusObject, STATUS
logger = logging.getLogger(__name__)
class Playlist(StatusObject):
def __init__(self, concurency=1, replace=False, loop=1):
super().__init__()
self._concurency = concurency
self._replace_on_add = replace
self._queue_waiting = []
self._queue_current = []
self._thread = None
self._loop = None
self._lock = threading.Lock()
def set_concurency(self, concurency):
self._concurency = concurency
def set_replace(self, replace):
self._replace_on_add = replace
def set_loop(self, loop):
self._loop = loop
def enqueue(self, sound):
with self._lock:
logger.debug("enqueue %s" % sound)
loop = sound._loop or self._loop
if loop is not None:
sound.set_loop(loop)
self._queue_waiting.append(sound)
def clear(self):
with self._lock:
self._queue_waiting.clear()
self._queue_current.clear()
def pause(self):
super().pause()
with self._lock:
for sound in self._queue_current:
sound.pause()
def stop(self):
super().stop()
with self._lock:
for sound in self._queue_current:
sound.stop()
self.clear()
def play(self):
super().play()
if self._thread is None:
logger.debug("Create playlist Thread")
self._thread = threading.Thread(target=self._thread_task, daemon=True)
logger.debug("Start playlist Thread")
self._thread.start()
with self._lock:
for sound in self._queue_current:
sound.play()
def _thread_task(self):
logger.debug("In playlist Thread")
while self._status != STATUS.STOPPED:
logger.debug("Thread loop")
if self._status == STATUS.PLAYING:
with self._lock:
# remove stopped sound
i = 0
while i < len(self._queue_current):
sound_status = self._queue_current[i].poll()
if sound_status == STATUS.STOPPED:
logger.debug("sound %s has stopped. Remove it", sound)
sound = self._queue_current.pop(i)
else:
i += 1
if self._replace_on_add and len(self._queue_waiting):
# remove a sound to make a place for a new one
if len(self._queue_current) == self._concurency:
sound = self._queue_current.pop(0)
sound.stop()
# add new if needed
while self._concurency > len(self._queue_current) and len(self._queue_waiting):
sound = self._queue_waiting.pop(0)
logger.debug("Add sound %s", sound)
sound.play()
self._queue_current.append(sound)
time.sleep(0.1)
self._thread = None
class SoundPlayer(StatusObject):
def __init__(self):
super().__init__()
self._playlists = defaultdict(Playlist)
def enqueue(self, sound, playlist):
if not playlist in self._playlists:
if self._status == STATUS.PLAYING:
self._playlists[playlist].play()
elif self._status == STATUS.PAUSED:
self._playlists[playlist].pause()
self._playlists[playlist].enqueue(sound)
def status(self, playlist=None):
if playlist is not None:
return self._playlists[playlist].status()
return super().status()
def get_playlists(self):
return self._playlists.keys()
def delete_playlist(self, playlist):
self._playlists[playlist].stop()
del self._playlists[playlist]
def play(self, playlist=None):
if playlist is not None:
return self._playlists[playlist].play()
else:
for pl in self._playlists.values():
if pl.status() != STATUS.PLAYING:
pl.play()
super().play()
def pause(self, playlist=None):
if playlist is not None:
return self._playlists[playlist].pause()
else:
for pl in self._playlists.values():
if pl.status() != STATUS.PAUSED:
pl.pause()
super().pause()
def stop(self, playlist=None):
if playlist is not None:
return self._playlists[playlist].stop()
else:
for pl in self._playlists.values():
if pl.status() != STATUS.STOPPED:
pl.stop()
super().stop()
|
systeminfo.py
|
import csv
import datetime
import logging
import os
import platform
import subprocess
import threading
import time
from typing import Any, Dict, List
import psutil
from calchas.common import base
class Sensor(base.Publisher):
def __init__(self, options: Dict[str, Any]):
super().__init__(options)
self.impl = None
self.read_thread = None
self.request_stop = False
def offer(self) -> List[str]:
# TODO: support topics
return ["all"]
def _start_impl(self) -> None:
if not self.impl:
if platform.system() == "Linux" and os.uname()[4][:3] == "arm": # TODO: identify raspberry pi
self.impl = SensorImplRaspi(self.out_dir)
else:
# Generic implementation
self.impl = SensorImpl(self.out_dir)
self.request_stop = False
if not self.read_thread:
logging.info("Starting system info thread...")
self.read_thread = threading.Thread(target=self._read_thread_fn)
self.read_thread.start()
logging.info(f"System info thread started.")
def _stop_impl(self) -> None:
self.request_stop = True
if self.read_thread:
self.read_thread.join()
self.read_thread = None
def _read_thread_fn(self) -> None:
frequency_sleep_sec = 1. / self.options.get("frequency", 1.)
while not self.request_stop:
data = {}
data.update(self.impl.read_system())
data.update(self.impl.read_process())
data.update(self.impl.read_disk())
# TODO: create classes for payload-types
self.publish("all", data)
time.sleep(frequency_sleep_sec - time.time() % frequency_sleep_sec)
class SensorImpl:
def __init__(self, out_dir: str):
self.process = psutil.Process(os.getpid())
self.out_dir = out_dir
def read_system(self) -> Dict[str, Any]:
cpu_times_percent = psutil.cpu_times_percent()
loadavg = psutil.getloadavg()
return {
"system_cpu_percent": psutil.cpu_percent(),
"system_cpu_times_percent_system": cpu_times_percent.system,
"system_cpu_times_percent_user": cpu_times_percent.user,
"system_cpu_times_percent_idle": cpu_times_percent.idle,
"system_cpu_temp": 0,
"system_loadavg_1": loadavg[0],
"system_loadavg_5": loadavg[1],
"system_loadavg_15": loadavg[2],
"system_virtual_memory_percent": psutil.virtual_memory().percent,
}
def read_process(self) -> Dict[str, Any]:
with self.process.oneshot():
return {
"process_cpu_percent": self.process.cpu_percent(),
"process_cpu_time_system": self.process.cpu_times().system,
"process_cpu_time_user": self.process.cpu_times().user,
"process_mem_rss_percent": self.process.memory_percent(memtype="rss"),
"process_mem_vms_percent": self.process.memory_percent(memtype="vms"),
}
def read_disk(self) -> Dict[str, Any]:
output_part = self._find_mount_point(self.out_dir)
try:
_, _, _, percent = psutil.disk_usage(output_part)
return {"disk_percent": percent}
except PermissionError as e:
logging.debug(f"Exception accessing {output_part}: {e}")
return {"disk_percent": 0}
def _find_mount_point(self, path: str) -> str:
"""Based on https://stackoverflow.com/a/4453715."""
mount_point = os.path.abspath(path)
while not os.path.ismount(mount_point):
mount_point = os.path.dirname(mount_point)
return mount_point
class SensorImplRaspi(SensorImpl):
def __init__(self, out_dir: str):
super().__init__(out_dir)
def read_system(self) -> Dict[str, Any]:
def get_cpu_temp():
# TODO: better error handling
# TODO: handle SIGNINT, e.g.: subprocess.CalledProcessError: Command 'vcgencmd measure_temp' died with <Signals.SIGINT: 2>
return float(subprocess.check_output("vcgencmd measure_temp", shell=True).decode("utf-8").split("=")[1].split("\'")[0])
state = super().read_system()
state["system_cpu_temp"] = get_cpu_temp()
return state
class Output(base.Subscriber):
def __init__(self, options: Dict[str, Any]):
super().__init__(options)
self.fpath = os.path.join(self.out_dir, self.options["output"])
self.fd = None
self.header_written = False
self.data = []
def _start_impl(self):
self.fd = open(self.fpath, "w", newline="")
self.header_written = False
self.data = []
def _stop_impl(self):
self.flush()
if self.fd:
self.fd.close()
self.fd = None
def on_process_message(self, msg: base.Message):
new_data = {"timestamp": msg.timestamp}
new_data.update(msg.data)
self.data.append(new_data)
# Write data to disk every X entries
if len(self.data) % self.options["output_write_threshold"] == 0:
self.flush()
def flush(self):
if self.fd and self.data:
writer = csv.DictWriter(self.fd, fieldnames=self.data[0].keys())
if not self.header_written:
writer.writeheader()
self.header_written = True
writer.writerows(self.data)
self.data.clear()
logging.info("System info output flushed")
|
recursiveFractalSearch.py
|
import math
import numpy as np
import random
import timeit
from threading import Thread
import functools
dist_ar = [] # 거리표(global)
cities_count = 0 # 도시 수(global)
dots_list = [] # 도시 리스트(global)
# Hyper Parameter
limits = 60 * 12 # 제한시간
Fractal_size = 5 # 재귀 수
# 시간제한 데코레이터
def timeout(seconds_before_timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (func.__name__, seconds_before_timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(seconds_before_timeout)
except Exception as e:
print('error starting thread')
raise e
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
# 거리표 제작(param : 문제 경로)
def make_distArray(str):
global dist_ar
global limit_time
global cities_count
global dots_list
reader = open(str, mode='rt', encoding='utf-8')
dots_list = reader.read().split("\n") # ['x1 y1', 'x2 y2', 'x3 y3' ... 'xn yn']
cities_count = int(dots_list.pop(0))
x_list = [] # ['x1', 'x2', 'x3' ... 'xn']
y_list = [] # ['y1', 'y2', 'y3' ... 'yn']
for i in range(cities_count):
temp = dots_list[i].split(" ")
x_list.append(float(temp[0]))
y_list.append(float(temp[1]))
dist_ar = []
for n in range(cities_count):
temp = []
for m in range(cities_count):
temp.append((math.sqrt(((x_list[m] - x_list[n]) ** 2) + ((y_list[m] - y_list[n]) ** 2))))
dist_ar.append(temp)
dist_ar = np.array(dist_ar)
print(dist_ar)
# 거리표를 이용한 적합도 매칭 함수
def cal_fit(stri):
fit = 0
for steps in range(len(stri) - 1):
fit += dist_ar[stri[steps], stri[steps + 1]]
return fit
def optFunc(stri) :
route = stri
fitness = cal_fit(route)
while 1 :
breaker = True
for i in range(len(route)):
for j in range(len(route)):
new_route = optSwap(route, i, j)
new_fitness = cal_fit(new_route)
if new_fitness < fitness:
route = new_route
fitness = new_fitness
breaker = False
break
if breaker == False :
break
if breaker == True :
break
return route
def optSwap(route,head,tail):
new_route = []
new_route += route[0:head]
new_route += reversed(route[head:tail+1])
new_route += route[tail+1:len(route)]
return new_route
def randomTwo(ranges) :
randomList = []
randomList += random.sample(range(0,ranges), 2)
randomList.sort()
return randomList
def randomFour(ranges) :
randomList = []
randomList += random.sample(range(0,ranges), 4)
randomList.sort()
return randomList
def twoOptMove(nest, pointList) :
nest = nest[:]
new_nest = optSwap(nest, pointList[0], pointList[1])
return new_nest
def doublebridgeMove(nest, pointList) :
nest = nest[:]
new_nest = optSwap(nest, pointList[0], pointList[1])
new_nest = optSwap(new_nest, pointList[1], pointList[3])
return new_nest
def makeFractal(route, calls) :
global population
if not calls > Fractal_size :
calls += 1
small = twoOptMove(route, sorted(randomTwo(cities_count)))
large = doublebridgeMove(route, sorted(randomFour(cities_count)))
population.append(small)
population.append(large)
makeFractal(small, calls)
makeFractal(large, calls)
def makeArr(population) :
fits = []
for i in range(len(population)) :
fits.append(cal_fit(population[i]))
arr = np.array([population, fits])
return arr.T
@timeout(limits)
def run() :
global population
generation = 0
optSol = random.sample(range(0, cities_count), cities_count)
population.append(optSol)
calls = 0
while 1 :
makeFractal(optSol, calls)
population = makeArr(population)
population = population[np.argsort(population[:, 1])] # fitness 기준 정렬
optSol = population[0,0]
if generation % 5000 == 0 :
print(generation, "세대 최적해", population[0,1])
population = []
population.append(optSol)
generation += 1
calls = 0
population = [] # 전역변수로 선언한 것
try :
make_distArray("dots/opt_cycle200.in")
start = timeit.default_timer()
run()
stop = timeit.default_timer()
print(stop - start)
except :
stop = timeit.default_timer()
print(stop - start)
|
sample_part1.py
|
import socket
import time
UDP_IP = '127.0.0.1'
UDP_PORT = 8883
def run_client(ip, port):
time.sleep(2)
MESSAGE = u"""Эхо-служба (€) «Hello World!»"""
sock = socket.socket(socket.AF_INET, # IPv4
socket.SOCK_DGRAM) # UDP
server = (ip, port)
for line in MESSAGE.split(' '):
data = line.encode('utf-8')
sock.sendto(data, server)
print(f'Клиент|| На сервер {server} отправлено: {repr(data)}')
response, address = sock.recvfrom(1024) # размер буфера: 1024
print(f"Клиент|| Получены данные: {repr(response.decode('utf-8'))}, "
f"от {address}")
print("Завершение работы клиента")
def run_server(ip, port):
sock = socket.socket(socket.AF_INET, # IPv4
socket.SOCK_DGRAM) # UDP
server = (ip, port)
sock.bind(server)
print(f'Запуск эхо-сервера: {server}')
while True:
data, address = sock.recvfrom(1024) # размер буфера: 1024
print(f"Сервер|| Получены данные: {repr(data.decode('utf-8'))}, "
f"от {address}")
sock.sendto(data, address)
print(f'Сервер|| Отправлены данные: {repr(data)}, '
f'по адресу: {address}')
if __name__ == "__main__":
from multiprocessing import Process
client = Process(target=run_client, args=(UDP_IP, UDP_PORT,))
server = Process(target=run_server, args=(UDP_IP, UDP_PORT,))
server.start()
client.start()
client.join()
server.terminate()
|
test_player.py
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for BPD's implementation of the MPD protocol.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test.helper import TestHelper
import os
import sys
import multiprocessing as mp
import threading
import socket
import time
import yaml
import tempfile
from contextlib import contextmanager
from beets.util import confit, py3_path
from beetsplug import bpd
# Mock GstPlayer so that the forked process doesn't attempt to import gi:
import mock
import imp
gstplayer = imp.new_module("beetsplug.bpd.gstplayer")
def _gstplayer_play(*_): # noqa: 42
bpd.gstplayer._GstPlayer.playing = True
return mock.DEFAULT
gstplayer._GstPlayer = mock.MagicMock(
spec_set=[
"time", "volume", "playing", "run", "play_file", "pause", "stop",
"seek", "play"
], **{
'playing': False,
'volume': 0,
'time.return_value': (0, 0),
'play_file.side_effect': _gstplayer_play,
'play.side_effect': _gstplayer_play,
})
gstplayer.GstPlayer = lambda _: gstplayer._GstPlayer
sys.modules["beetsplug.bpd.gstplayer"] = gstplayer
bpd.gstplayer = gstplayer
class CommandParseTest(unittest.TestCase):
def test_no_args(self):
s = r'command'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [])
def test_one_unquoted_arg(self):
s = r'command hello'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello'])
def test_two_unquoted_args(self):
s = r'command hello there'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello', u'there'])
def test_one_quoted_arg(self):
s = r'command "hello there"'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there'])
def test_heterogenous_args(self):
s = r'command "hello there" sir'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there', u'sir'])
def test_quote_in_arg(self):
s = r'command "hello \" there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello " there'])
def test_backslash_in_arg(self):
s = r'command "hello \\ there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello \\ there'])
class MPCResponse(object):
def __init__(self, raw_response):
body = b'\n'.join(raw_response.split(b'\n')[:-2]).decode('utf-8')
self.data = self._parse_body(body)
status = raw_response.split(b'\n')[-2].decode('utf-8')
self.ok, self.err_data = self._parse_status(status)
def _parse_status(self, status):
""" Parses the first response line, which contains the status.
"""
if status.startswith('OK') or status.startswith('list_OK'):
return True, None
elif status.startswith('ACK'):
code, rest = status[5:].split('@', 1)
pos, rest = rest.split(']', 1)
cmd, rest = rest[2:].split('}')
return False, (int(code), int(pos), cmd, rest[1:])
else:
raise RuntimeError('Unexpected status: {!r}'.format(status))
def _parse_body(self, body):
""" Messages are generally in the format "header: content".
Convert them into a dict, storing the values for repeated headers as
lists of strings, and non-repeated ones as string.
"""
data = {}
repeated_headers = set()
for line in body.split('\n'):
if not line:
continue
if ':' not in line:
raise RuntimeError('Unexpected line: {!r}'.format(line))
header, content = line.split(':', 1)
content = content.lstrip()
if header in repeated_headers:
data[header].append(content)
elif header in data:
data[header] = [data[header], content]
repeated_headers.add(header)
else:
data[header] = content
return data
class MPCClient(object):
def __init__(self, sock, do_hello=True):
self.sock = sock
self.buf = b''
if do_hello:
hello = self.get_response()
if not hello.ok:
raise RuntimeError('Bad hello')
def get_response(self, force_multi=None):
""" Wait for a full server response and wrap it in a helper class.
If the request was a batch request then this will return a list of
`MPCResponse`s, one for each processed subcommand.
"""
response = b''
responses = []
while True:
line = self.readline()
response += line
if line.startswith(b'OK') or line.startswith(b'ACK'):
if force_multi or any(responses):
if line.startswith(b'ACK'):
responses.append(MPCResponse(response))
n_remaining = force_multi - len(responses)
responses.extend([None] * n_remaining)
return responses
else:
return MPCResponse(response)
if line.startswith(b'list_OK'):
responses.append(MPCResponse(response))
response = b''
elif not line:
raise RuntimeError('Unexpected response: {!r}'.format(line))
def serialise_command(self, command, *args):
cmd = [command.encode('utf-8')]
for arg in [a.encode('utf-8') for a in args]:
if b' ' in arg:
cmd.append(b'"' + arg + b'"')
else:
cmd.append(arg)
return b' '.join(cmd) + b'\n'
def send_command(self, command, *args):
request = self.serialise_command(command, *args)
self.sock.sendall(request)
return self.get_response()
def send_commands(self, *commands):
""" Use MPD command batching to send multiple commands at once.
Each item of commands is a tuple containing a command followed by
any arguments.
"""
requests = []
for command_and_args in commands:
command = command_and_args[0]
args = command_and_args[1:]
requests.append(self.serialise_command(command, *args))
requests.insert(0, b'command_list_ok_begin\n')
requests.append(b'command_list_end\n')
request = b''.join(requests)
self.sock.sendall(request)
return self.get_response(force_multi=len(commands))
def readline(self, terminator=b'\n', bufsize=1024):
""" Reads a line of data from the socket.
"""
while True:
if terminator in self.buf:
line, self.buf = self.buf.split(terminator, 1)
line += terminator
return line
self.sock.settimeout(1)
data = self.sock.recv(bufsize)
if data:
self.buf += data
else:
line = self.buf
self.buf = b''
return line
def start_beets(*args):
import beets.ui
beets.ui.main(list(args))
def implements(commands, expectedFailure=False): # noqa: N803
def _test(self):
with self.run_bpd() as client:
response = client.send_command('commands')
self._assert_ok(response)
implemented = response.data['command']
self.assertEqual(commands.intersection(implemented), commands)
return unittest.expectedFailure(_test) if expectedFailure else _test
class BPDTestHelper(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets(disk=True)
self.load_plugins('bpd')
self.item1 = self.add_item(
title='Track One Title', track=1,
album='Album Title', artist='Artist Name')
self.item2 = self.add_item(
title='Track Two Title', track=2,
album='Album Title', artist='Artist Name')
self.lib.add_album([self.item1, self.item2])
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@contextmanager
def run_bpd(self, host='localhost', port=9876, password=None,
do_hello=True, second_client=False):
""" Runs BPD in another process, configured with the same library
database as we created in the setUp method. Exposes a client that is
connected to the server, and kills the server at the end.
"""
# Create a config file:
config = {
'pluginpath': [py3_path(self.temp_dir)],
'plugins': 'bpd',
'bpd': {'host': host, 'port': port, 'control_port': port + 1},
}
if password:
config['bpd']['password'] = password
config_file = tempfile.NamedTemporaryFile(
mode='wb', dir=py3_path(self.temp_dir), suffix='.yaml',
delete=False)
config_file.write(
yaml.dump(config, Dumper=confit.Dumper, encoding='utf-8'))
config_file.close()
# Fork and launch BPD in the new process:
args = (
'--library', self.config['library'].as_filename(),
'--directory', py3_path(self.libdir),
'--config', py3_path(config_file.name),
'bpd'
)
server = mp.Process(target=start_beets, args=args)
server.start()
# Wait until the socket is connected:
sock, sock2 = None, None
for _ in range(20):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sock.connect_ex((host, port)) == 0:
break
else:
sock.close()
time.sleep(0.01)
else:
raise RuntimeError('Timed out waiting for the BPD server')
try:
if second_client:
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.connect((host, port))
yield MPCClient(sock, do_hello), MPCClient(sock2, do_hello)
else:
yield MPCClient(sock, do_hello)
finally:
sock.close()
if sock2:
sock2.close()
server.terminate()
server.join(timeout=0.2)
def _assert_ok(self, *responses):
for response in responses:
self.assertTrue(response is not None)
self.assertTrue(response.ok, 'Response failed: {}'.format(
response.err_data))
def _assert_failed(self, response, code, pos=None):
""" Check that a command failed with a specific error code. If this
is a list of responses, first check all preceding commands were OK.
"""
if pos is not None:
previous_commands = response[0:pos]
self._assert_ok(*previous_commands)
response = response[pos]
self.assertEqual(pos, response.err_data[1])
self.assertFalse(response.ok)
if code is not None:
self.assertEqual(code, response.err_data[0])
def _bpd_add(self, client, *items, **kwargs):
""" Add the given item to the BPD playlist or queue.
"""
paths = ['/'.join([
item.artist, item.album,
py3_path(os.path.basename(item.path))]) for item in items]
playlist = kwargs.get('playlist')
if playlist:
commands = [('playlistadd', playlist, path) for path in paths]
else:
commands = [('add', path) for path in paths]
responses = client.send_commands(*commands)
self._assert_ok(*responses)
class BPDTest(BPDTestHelper):
def test_server_hello(self):
with self.run_bpd(do_hello=False) as client:
self.assertEqual(client.readline(), b'OK MPD 0.14.0\n')
def test_unknown_cmd(self):
with self.run_bpd() as client:
response = client.send_command('notacommand')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_unexpected_argument(self):
with self.run_bpd() as client:
response = client.send_command('ping', 'extra argument')
self._assert_failed(response, bpd.ERROR_ARG)
def test_missing_argument(self):
with self.run_bpd() as client:
response = client.send_command('add')
self._assert_failed(response, bpd.ERROR_ARG)
def test_system_error(self):
with self.run_bpd() as client:
response = client.send_command('crash_TypeError')
self._assert_failed(response, bpd.ERROR_SYSTEM)
def test_empty_request(self):
with self.run_bpd() as client:
response = client.send_command('')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
class BPDQueryTest(BPDTestHelper):
test_implements_query = implements({
'clearerror', 'currentsong', 'stats',
})
def test_cmd_status(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('play',),
('status',))
self._assert_ok(*responses)
fields_not_playing = {
'repeat', 'random', 'single', 'consume', 'playlist',
'playlistlength', 'mixrampdb', 'state',
'volume'
}
self.assertEqual(fields_not_playing, set(responses[0].data.keys()))
fields_playing = fields_not_playing | {
'song', 'songid', 'time', 'elapsed', 'bitrate', 'duration', 'audio'
}
self.assertEqual(fields_playing, set(responses[2].data.keys()))
def test_cmd_idle(self):
def _toggle(c):
for _ in range(3):
rs = c.send_commands(('play',), ('pause',))
# time.sleep(0.05) # uncomment if test is flaky
if any(not r.ok for r in rs):
raise RuntimeError('Toggler failed')
with self.run_bpd(second_client=True) as (client, client2):
self._bpd_add(client, self.item1, self.item2)
toggler = threading.Thread(target=_toggle, args=(client2,))
toggler.start()
# Idling will hang until the toggler thread changes the play state.
# Since the client sockets have a 1s timeout set at worst this will
# raise a socket.timeout and fail the test if the toggler thread
# manages to finish before the idle command is sent here.
response = client.send_command('idle', 'player')
toggler.join()
self._assert_ok(response)
def test_cmd_idle_with_pending(self):
with self.run_bpd(second_client=True) as (client, client2):
response1 = client.send_command('random', '1')
response2 = client2.send_command('idle')
self._assert_ok(response1, response2)
self.assertEqual('options', response2.data['changed'])
def test_cmd_noidle(self):
with self.run_bpd() as client:
# Manually send a command without reading a response.
request = client.serialise_command('idle')
client.sock.sendall(request)
time.sleep(0.01)
response = client.send_command('noidle')
self._assert_ok(response)
class BPDPlaybackTest(BPDTestHelper):
test_implements_playback = implements({
'random',
})
def test_cmd_consume(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('consume', '0'),
('playlistinfo',),
('next',),
('playlistinfo',),
('consume', '1'),
('playlistinfo',),
('play', '0'),
('next',),
('playlistinfo',),
('status',))
self._assert_ok(*responses)
self.assertEqual(responses[1].data['Id'], responses[3].data['Id'])
self.assertEqual(['1', '2'], responses[5].data['Id'])
self.assertEqual('2', responses[8].data['Id'])
self.assertEqual('1', responses[9].data['consume'])
self.assertEqual('play', responses[9].data['state'])
def test_cmd_consume_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('consume', '1'),
('play', '1'),
('playlistinfo',),
('previous',),
('playlistinfo',),
('status',))
self._assert_ok(*responses)
self.assertEqual(['1', '2'], responses[2].data['Id'])
self.assertEqual('1', responses[4].data['Id'])
self.assertEqual('play', responses[5].data['state'])
def test_cmd_single(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('single', '1'),
('play',),
('status',),
('next',),
('status',))
self._assert_ok(*responses)
self.assertEqual('0', responses[0].data['single'])
self.assertEqual('1', responses[3].data['single'])
self.assertEqual('play', responses[3].data['state'])
self.assertEqual('stop', responses[5].data['state'])
def test_cmd_repeat(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('play',),
('currentsong',),
('next',),
('currentsong',),
('next',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[2].data['Id'])
self.assertEqual('2', responses[4].data['Id'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_repeat_with_single(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('single', '1'),
('play',),
('currentsong',),
('next',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_repeat_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('play',),
('currentsong',),
('previous',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[2].data['Id'])
self.assertEqual('2', responses[4].data['Id'])
def test_cmd_repeat_with_single_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('single', '1'),
('play',),
('currentsong',),
('previous',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_crossfade(self):
with self.run_bpd() as client:
responses = client.send_commands(
('status',),
('crossfade', '123'),
('status',),
('crossfade', '-2'))
response = client.send_command('crossfade', '0.5')
self._assert_failed(responses, bpd.ERROR_ARG, pos=3)
self._assert_failed(response, bpd.ERROR_ARG)
self.assertNotIn('xfade', responses[0].data)
self.assertAlmostEqual(123, int(responses[2].data['xfade']))
def test_cmd_mixrampdb(self):
with self.run_bpd() as client:
responses = client.send_commands(
('mixrampdb', '-17'),
('status',))
self._assert_ok(*responses)
self.assertAlmostEqual(-17, float(responses[1].data['mixrampdb']))
def test_cmd_mixrampdelay(self):
with self.run_bpd() as client:
responses = client.send_commands(
('mixrampdelay', '2'),
('status',),
('mixrampdelay', 'nan'),
('status',),
('mixrampdelay', '-2'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=4)
self.assertAlmostEqual(2, float(responses[1].data['mixrampdelay']))
self.assertNotIn('mixrampdelay', responses[3].data)
def test_cmd_setvol(self):
with self.run_bpd() as client:
responses = client.send_commands(
('setvol', '67'),
('status',),
('setvol', '32'),
('status',),
('setvol', '101'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=4)
self.assertEqual('67', responses[1].data['volume'])
self.assertEqual('32', responses[3].data['volume'])
def test_cmd_volume(self):
with self.run_bpd() as client:
response = client.send_command('volume', '10')
self._assert_failed(response, bpd.ERROR_SYSTEM)
def test_cmd_replay_gain(self):
with self.run_bpd() as client:
responses = client.send_commands(
('replay_gain_mode', 'track'),
('replay_gain_status',),
('replay_gain_mode', 'notanoption'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertAlmostEqual('track', responses[1].data['replay_gain_mode'])
class BPDControlTest(BPDTestHelper):
test_implements_control = implements({
'pause', 'playid', 'seek',
'seekid', 'seekcur', 'stop',
}, expectedFailure=True)
def test_cmd_play(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('play',),
('status',),
('play', '1'),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('stop', responses[0].data['state'])
self.assertEqual('play', responses[2].data['state'])
self.assertEqual('2', responses[4].data['Id'])
def test_cmd_next(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('play',),
('currentsong',),
('next',),
('currentsong',),
('next',),
('status',))
self._assert_ok(*responses)
self.assertEqual('1', responses[1].data['Id'])
self.assertEqual('2', responses[3].data['Id'])
self.assertEqual('stop', responses[5].data['state'])
def test_cmd_previous(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('play', '1'),
('currentsong',),
('previous',),
('currentsong',),
('previous',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('2', responses[1].data['Id'])
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
class BPDQueueTest(BPDTestHelper):
test_implements_queue = implements({
'addid', 'clear', 'delete', 'deleteid', 'move',
'moveid', 'playlist', 'playlistfind', 'playlistid',
'playlistsearch', 'plchanges',
'plchangesposid', 'prio', 'prioid', 'rangeid', 'shuffle',
'swap', 'swapid', 'addtagid', 'cleartagid',
}, expectedFailure=True)
def test_cmd_add(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
def test_cmd_playlistinfo(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
responses = client.send_commands(
('playlistinfo',),
('playlistinfo', '0'),
('playlistinfo', '200'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
class BPDPlaylistsTest(BPDTestHelper):
test_implements_playlists = implements({'playlistadd'})
def test_cmd_listplaylist(self):
with self.run_bpd() as client:
response = client.send_command('listplaylist', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
def test_cmd_listplaylistinfo(self):
with self.run_bpd() as client:
response = client.send_command('listplaylistinfo', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
def test_cmd_listplaylists(self):
with self.run_bpd() as client:
response = client.send_command('listplaylists')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_load(self):
with self.run_bpd() as client:
response = client.send_command('load', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
@unittest.skip
def test_cmd_playlistadd(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, playlist='anything')
def test_cmd_playlistclear(self):
with self.run_bpd() as client:
response = client.send_command('playlistclear', 'anything')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_playlistdelete(self):
with self.run_bpd() as client:
response = client.send_command('playlistdelete', 'anything', '0')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_playlistmove(self):
with self.run_bpd() as client:
response = client.send_command(
'playlistmove', 'anything', '0', '1')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_rename(self):
with self.run_bpd() as client:
response = client.send_command('rename', 'anything', 'newname')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_rm(self):
with self.run_bpd() as client:
response = client.send_command('rm', 'anything')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_save(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
response = client.send_command('save', 'newplaylist')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
class BPDDatabaseTest(BPDTestHelper):
test_implements_database = implements({
'albumart', 'find', 'findadd', 'listall',
'listallinfo', 'listfiles', 'readcomments',
'searchadd', 'searchaddpl', 'update', 'rescan',
}, expectedFailure=True)
def test_cmd_search(self):
with self.run_bpd() as client:
response = client.send_command('search', 'track', '1')
self._assert_ok(response)
self.assertEqual(self.item1.title, response.data['Title'])
def test_cmd_list(self):
with self.run_bpd() as client:
responses = client.send_commands(
('list', 'album'),
('list', 'track'),
('list', 'album', 'artist', 'Artist Name', 'track'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertEqual('Album Title', responses[0].data['Album'])
self.assertEqual(['1', '2'], responses[1].data['Track'])
def test_cmd_list_three_arg_form(self):
with self.run_bpd() as client:
responses = client.send_commands(
('list', 'album', 'artist', 'Artist Name'),
('list', 'album', 'Artist Name'),
('list', 'track', 'Artist Name'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertEqual(responses[0].data, responses[1].data)
def test_cmd_lsinfo(self):
with self.run_bpd() as client:
response1 = client.send_command('lsinfo')
self._assert_ok(response1)
response2 = client.send_command(
'lsinfo', response1.data['directory'])
self._assert_ok(response2)
response3 = client.send_command(
'lsinfo', response2.data['directory'])
self._assert_ok(response3)
self.assertIn(self.item1.title, response3.data['Title'])
def test_cmd_count(self):
with self.run_bpd() as client:
response = client.send_command('count', 'track', '1')
self._assert_ok(response)
self.assertEqual('1', response.data['songs'])
self.assertEqual('0', response.data['playtime'])
class BPDMountsTest(BPDTestHelper):
test_implements_mounts = implements({
'mount', 'unmount', 'listmounts', 'listneighbors',
}, expectedFailure=True)
class BPDStickerTest(BPDTestHelper):
test_implements_stickers = implements({
'sticker',
}, expectedFailure=True)
class BPDConnectionTest(BPDTestHelper):
test_implements_connection = implements({
'close', 'kill', 'tagtypes',
})
def test_cmd_password(self):
with self.run_bpd(password='abc123') as client:
response = client.send_command('status')
self._assert_failed(response, bpd.ERROR_PERMISSION)
response = client.send_command('password', 'wrong')
self._assert_failed(response, bpd.ERROR_PASSWORD)
responses = client.send_commands(
('password', 'abc123'),
('status',))
self._assert_ok(*responses)
def test_cmd_ping(self):
with self.run_bpd() as client:
response = client.send_command('ping')
self._assert_ok(response)
@unittest.skip
def test_cmd_tagtypes(self):
with self.run_bpd() as client:
response = client.send_command('tagtypes')
self._assert_ok(response)
self.assertEqual({
'Artist', 'ArtistSort', 'Album', 'AlbumSort', 'AlbumArtist',
'AlbumArtistSort', 'Title', 'Track', 'Name', 'Genre', 'Date',
'Composer', 'Performer', 'Comment', 'Disc', 'Label',
'OriginalDate', 'MUSICBRAINZ_ARTISTID', 'MUSICBRAINZ_ALBUMID',
'MUSICBRAINZ_ALBUMARTISTID', 'MUSICBRAINZ_TRACKID',
'MUSICBRAINZ_RELEASETRACKID', 'MUSICBRAINZ_WORKID',
}, set(response.data['tag']))
@unittest.skip
def test_tagtypes_mask(self):
with self.run_bpd() as client:
response = client.send_command('tagtypes', 'clear')
self._assert_ok(response)
class BPDPartitionTest(BPDTestHelper):
test_implements_partitions = implements({
'partition', 'listpartitions', 'newpartition',
}, expectedFailure=True)
class BPDDeviceTest(BPDTestHelper):
test_implements_devices = implements({
'disableoutput', 'enableoutput', 'toggleoutput', 'outputs',
}, expectedFailure=True)
class BPDReflectionTest(BPDTestHelper):
test_implements_reflection = implements({
'config', 'commands', 'notcommands', 'urlhandlers',
'decoders',
}, expectedFailure=True)
class BPDPeersTest(BPDTestHelper):
test_implements_peers = implements({
'subscribe', 'unsubscribe', 'channels', 'readmessages',
'sendmessage',
}, expectedFailure=True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
ReducePNGFolder.py
|
# -*- coding: utf-8 -*-
import os
import multiprocessing
import threading
from PIL import Image
import sys
class ReducePNGFolder:
#Author: Juan Pablo Toledo Gavagnin
'''
# How to use #
python scriptName.py originalFolderURI destinationFolderURI
# Dependencies #
This scripts needs to have previously installed Pillow:
pip install pillow
'''
# Attributes
FOLDER_ORIGINALFILES = ""
FOLDER_REDUCEDFILES = ""
CORES = multiprocessing.cpu_count()
# Global lists
listOriginalFiles = []
listReducedFiles = []
listFilesToProcess = []
def __init__(self):
self.FOLDER_ORIGINALFILES = sys.argv[1]
self.FOLDER_REDUCEDFILES = sys.argv[2]
self.checkFolder(self.FOLDER_ORIGINALFILES)
self.checkFolder(self.FOLDER_REDUCEDFILES)
self.CORES = multiprocessing.cpu_count()
print(self.FOLDER_ORIGINALFILES + self.FOLDER_REDUCEDFILES)
def main(self):
self.listOriginalFiles = self.listFolder(self.FOLDER_ORIGINALFILES)
self.listReducedFiles = self.listFolder(self.FOLDER_REDUCEDFILES)
self.listFilesToProcess = self.compareLists()
self.divide_jobs_multithreading(self.listFilesToProcess)
def checkFolder(self, uri):
if not(os.path.isdir(uri)):
print("The folder '" + uri + "' doesn't exist")
exit()
def listFolder(self, ROUTE):
listFiles = os.listdir(ROUTE)
print("There are " + str(len(listFiles)) + " files in " + ROUTE)
return listFiles
# Manejo de listas
def compareLists(self):
lista = []
for fileItem in self.listOriginalFiles:
if fileItem not in self.listReducedFiles:
print("The file " + fileItem + " is not reduced")
lista.append(fileItem)
return lista
def divide_jobs_multithreading(self, lista):
# 1 - Divide elements in differents lists to give each list to a new thread
# 2 - This script divides the list depending of the cores
listSize = len(lista)
listOfList = []
sliceSize = listSize / self.CORES
remain = listSize % self.CORES
iterator = iter(lista)
elemento = 1
for i in range(self.CORES):
listOfList.append([])
for j in range(sliceSize):
listOfList[i].append(iterator.next())
elemento+=1
if remain:
listOfList[i].append(iterator.next())
elemento+=1
remain -= 1
# With each list, we have to create a new thread to process this problem more faster
for listThread in listOfList:
threading.Thread(target=self.reducePicture,args=(listThread,)).start()
# Generar imagen reducida
def reducePicture(self, listFiles):
for item in listFiles:
print("Reducing " + item)
imagenOriginal = Image.open(self.FOLDER_ORIGINALFILES + os.path.sep + item)
imagenOriginal.resize((400, 400), Image.ANTIALIAS)
imagenOriginal.save(self.FOLDER_REDUCEDFILES + os.path.sep + item,quality=20,optimize=True)
objeto = ReducePNGFolder()
objeto.main()
|
call.py
|
# Copyright (c) 2014-2015, Ruslan Baratov
# All rights reserved.
# Adapted to python3 version of: http://stackoverflow.com/questions/4984428
import os
import platform
import subprocess
import sys
import threading
import time
# Tests:
#
# Windows:
# * Control Panel -> Region -> Administrative -> Current languange for non-Unicode programs: "Russian (Russia)"
# * cd to directory with name like 'привет' and run 'polly.py --verbose'
def tee(infile, discard, logging, console=None):
"""Print `infile` to `files` in a separate thread."""
def fanout():
discard_counter = 0
for line in iter(infile.readline, b''):
# use the same encoding as stdout/stderr
s = line.decode(
encoding=sys.stdout.encoding,
errors='replace'
)
s = s.replace('\r', '')
s = s.replace('\t', ' ')
s = s.rstrip() # strip spaces and EOL
s += '\n' # append stripped EOL back
logging.write(s)
if console is None:
continue
if discard is None:
console.write(s)
console.flush()
continue
if discard_counter == 0:
console.write(s)
console.flush()
discard_counter += 1
if discard_counter == discard:
discard_counter = 0
infile.close()
t = threading.Thread(target=fanout)
t.daemon = True
t.start()
return t
def teed_call(cmd_args, logging, output_filter=None):
p = subprocess.Popen(
cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
bufsize=0
)
threads = []
output_pipe = p.stdout
if output_filter:
filter_p = subprocess.Popen(
output_filter,
stdin=output_pipe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
bufsize=0
)
# also pipe filter error to stderr and log
threads.append(tee(filter_p.stderr, logging.discard, logging, sys.stderr))
output_pipe = filter_p.stdout
if logging.verbosity != 'silent':
threads.append(tee(output_pipe, logging.discard, logging, sys.stdout))
threads.append(tee(p.stderr, logging.discard, logging, sys.stderr))
else:
threads.append(tee(output_pipe, logging.discard, logging))
threads.append(tee(p.stderr, logging.discard, logging))
for t in threads:
t.join() # wait for IO completion
return p.wait()
def call(call_args,logging, cache_file='', ignore=False, sleep=0, output_filter=None):
pretty = 'Execute command: [\n'
for i in call_args:
pretty += ' `{}`\n'.format(i)
pretty += ']\n'
print(pretty)
logging.write(pretty)
# print one line version
oneline = ''
for i in call_args:
oneline += ' "{}"'.format(i)
oneline = "[{}]>{}\n".format(os.getcwd(), oneline)
if logging.verbosity != 'silent':
print(oneline)
logging.write(oneline)
x = teed_call(call_args, logging, output_filter)
if x == 0 or ignore:
time.sleep(sleep)
return
if os.path.exists(cache_file):
os.unlink(cache_file)
logging.log_file.close()
print('Command exit with status "{}": {}'.format(x, oneline))
print('Log: {}'.format(logging.log_path))
logging.print_last_lines()
print('*** FAILED ***')
sys.exit(1)
|
client.py
|
# coding: utf-8
import time
import pickle
import socket
import random
import logging
import argparse
import threading
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
def main(name, port, ring, timeout):
# Create a logger for the client
logger = logging.getLogger('Client {}'.format(name))
# UDP Socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(('localhost', port))
# Generate a request
order = {'hamburger': 0, 'fries': 0, 'drink': 0}
quantity = random.randint(1,5)
for i in range(quantity):
order[random.choice(['hamburger', 'fries', 'drink'])] += 1
# Wait for a random time
delta = random.gauss(2, 0.5)
logger.info('Wait for %f seconds', delta)
time.sleep(delta)
# Request some food
logger.info('Request some food: %s', order)
p = pickle.dumps({'method': 'ORDER', 'args': order})
# p = pickle.dumps({'method': 'ORDER', 'args':{'hamburger': 1, 'fries': 0, 'drink': 0}})
sock.sendto(p, ring)
# Wait for Ticket
p, addr = sock.recvfrom(1024)
o = pickle.loads(p)
logger.info('Received ticket %s', o['args'])
# Pickup order
logger.info('Pickup order %s', o['args'])
p = pickle.dumps({"method": 'PICKUP', "args": o['args']})
sock.sendto(p, ring)
# Wait for order
p, addr = sock.recvfrom(1024)
o = pickle.loads(p)
logger.info('Got order %s', o['args'])
return 0
#Close socket
#thread.join()
#socket.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pi HTTP server')
parser.add_argument('-p', dest='port', type=int, help='client port', default=5004)
parser.add_argument('-r', dest='ring', type=int, help='ring ports ', default=5000)
parser.add_argument('-t', dest='timeout', type=int, help='socket timeout', default=60)
parser.add_argument('-c', dest='clients', type=int, help='num clients ', default=3)
args = parser.parse_args()
names=['Batman','IronMan','WonderWoman','BlackWidow','Nakia','CamilaUachave','DiogoGomes','JeanBrito','MarioAntunes']
for i in range(args.clients):
client_name="{} {}".format(random.choice(names),i)
thread = threading.Thread(target=main, args=(client_name, args.port+i, ('localhost', args.ring), args.timeout))
thread.start()
|
wsdump.py
|
#!/Users/eo/Dev/pyProj/vectorbt/.venv/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
Network.py
|
from pyramid.response import Response
from pyramid.view import view_config
import os
import sys
import time
import ujson
from datetime import datetime, timedelta
from lxml import etree, html
from .config import Config
import logging
log = logging.getLogger(__name__)
import networkx as nx
from networkx.readwrite import json_graph
from .Helpers import *
import multiprocessing
from pymongo.errors import (
OperationFailure
)
import pymongo
class Network:
def __init__(self, request):
"""For a given site - assemble the entity graph
@params:
request.matchdict: code, the site of interest
request.matchdict: explore, the type of graph being requested
"""
self.request = request
self.db = mdb(request)
self.site = request.matchdict['code']
self.graph_type = request.matchdict['explore']
claims, site = verify_access(request, site=self.site)
self.eac_path = site['eac']
self.source_map = site['map']
self.name = site['name']
self.url = site['url']
log.debug("Processing site: %s, data path: %s" % (self.site, self.eac_path))
def build(self) :
# is the data available? return now; nothing to do
doc = self.db.network.find_one({ 'site': self.site })
if doc is not None:
log.debug('Graph already built. No need to build it again')
return
# are we in the process of building it? return now; nothing to do
doc = self.db.network_progress.find_one({ 'site': self.site })
#print doc
if doc is not None:
log.debug('Graph currently being built')
return
# OTHERWISE:
# generate the list of datafiles and build the graph
for (dirpath, dirnames, filenames) in os.walk(self.eac_path):
if dirpath == self.eac_path:
datafiles = dict((fname, "%s/%s" % (dirpath, fname)) for fname in filenames)
# store a trace that indicates we're counting
total = len(list(datafiles.items()))
log.debug("Total number of entities in dataset: %s" % total)
# remove any previous progress traces that might exist
self.db.network_progress.insert({
'processed': 0,
'total': total,
'site': self.site,
'createdAt': datetime.utcnow()
})
data_age = self.request.registry.app_config['general']['data_age']
try:
self.db.network_progress.ensure_index('createdAt', expireAfterSeconds = int(data_age))
except OperationFailure:
self.db.network_progress.drop_index('createdAt_1')
self.db.network_progress.ensure_index('createdAt', expireAfterSeconds = int(data_age))
j = multiprocessing.Process(target=self.build_graph, args=(self.graph_type, datafiles, total))
j.start()
def build_graph(self, graph_type, datafiles, total):
log.debug('Building the graph.')
t1 = time.time()
graph = nx.Graph()
count = 0
save_counter = 0
nodes = {}
for fpath, fname in list(datafiles.items()):
log.debug("Processing: %s" % os.path.join(fpath,fname))
count += 1
try:
tree = etree.parse(fname)
except (TypeError, etree.XMLSyntaxError):
log.error("Invalid XML file: %s. %s." % (fname, sys.exc_info()[1]))
continue
if self.graph_type == 'byEntity':
self.entities_as_nodes(graph, tree)
elif self.graph_type == 'byFunction':
self.functions_as_nodes(graph, tree)
if save_counter >= 20:
# save a progress count
self.db.network_progress.update(
{ 'site': self.site },
{ '$set': { 'processed': count }}
)
# reset the counter
save_counter = 0
save_counter +=1
# count the number of connections
for n in graph:
graph.node[n]['connections'] = len(list(graph.neighbors(n)))
# save the graph
self.db.network.insert({
'site': self.site,
'graph_type': self.graph_type,
'graph_data': json_graph.node_link_data(graph),
'createdAt': datetime.utcnow()
})
data_age = self.request.registry.app_config['general']['data_age']
try:
self.db.network.ensure_index('createdAt', expireAfterSeconds = int(data_age))
except OperationFailure:
self.db.network.drop_index('createdAt_1')
self.db.network.ensure_index('createdAt', expireAfterSeconds = int(data_age))
# all done
t2 = time.time()
log.debug("Time taken to prepare data '/site': %s" % (t2 - t1))
return
def functions_as_nodes(self, graph, tree):
node_id = get(tree, '/e:eac-cpf/e:control/e:recordId')
ntype = get(tree, "/e:eac-cpf/e:control/e:localControl[@localType='typeOfEntity']/e:term")
url = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:entityId[1]")
df = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:fromDate", attrib="standardDate")
dt = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:toDate", attrib="standardDate")
name = self.get_entity_name(tree, ntype)
if len(df) == 0:
df = None
if len(dt) == 0:
dt = None
graph.add_node(node_id)
graph.node[node_id]['type'] = ntype
graph.node[node_id]['name'] = name
graph.node[node_id]['url'] = url
graph.node[node_id]['df'] = df
graph.node[node_id]['dt'] = dt
if tree.xpath('/e:eac-cpf/e:cpfDescription/e:description/e:functions/e:function/e:term', namespaces={ 'e': 'urn:isbn:1-931666-33-4' } ):
for function in get(tree, '/e:eac-cpf/e:cpfDescription/e:description/e:functions/e:function/e:term', element=True):
## graph.add_node(function.text, { 'type': function.text, 'name': function.text, 'url': None, 'df': None, 'dt': None })
graph.add_node(function.text)
graph.node(function.text)['type'] = function.text
graph.node(function.text)['name'] = function.text
graph.node(function.text)['url'] = None
graph.node(function.text)['df'] = None
graph.node(function.text)['dt'] = None
graph.add_edge(node_id, function.text, sid=node_id, tid=function.text)
else:
for function in get(tree, '/e:eac-cpf/e:cpfDescription/e:description/e:occupations/e:occupation/e:term', element=True):
##graph.add_node(function.text, { 'type': function.text, 'name': function.text, 'url': None, 'df': None, 'dt': None })
graph.add_node(function.text)
graph.node(function.text)['type'] = function.text
graph.node(function.text)['name'] = function.text
graph.node(function.text)['url'] = None
graph.node(function.text)['df'] = None
graph.node(function.text)['dt'] = None
graph.add_edge(node_id, function.text, sid=node_id, tid=function.text)
def entities_as_nodes(self, graph, tree):
"""
:type graph: object
"""
node_id = get(tree, '/e:eac-cpf/e:control/e:recordId')
ntype = get(tree, "/e:eac-cpf/e:control/e:localControl[@localType='typeOfEntity']/e:term")
core_type = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:entityType")
url = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:entityId[1]")
df = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:fromDate", attrib="standardDate")
dt = get(tree, "/e:eac-cpf/e:cpfDescription/e:description/e:existDates/e:dateRange/e:toDate", attrib="standardDate")
name = self.get_entity_name(tree, ntype)
if len(df) == 0:
df = None
if len(dt) == 0:
dt = None
# is the node_id an empty list or some other bit of nonsense
# if it is: skip to the next one
if not node_id:
return
if node_id not in graph.node:
try:
graph.add_node(node_id)
except:
# somethinge serious wrong. This should raise an exception so we can clean up the network_progress
e = sys.exc_info()[0]
log.error("Failed to insert node %s" % e)
return
#if we get here we have a valid node
graph.node[node_id]['type'] = ntype
graph.node[node_id]['coreType'] = core_type
graph.node[node_id]['name'] = name
graph.node[node_id]['url'] = url
graph.node[node_id]['df'] = df
graph.node[node_id]['dt'] = dt
related_entities = len(get(tree, '/e:eac-cpf/e:cpfDescription/e:relations/e:cpfRelation', element=True))
related_resources = get(tree, '/e:eac-cpf/e:cpfDescription/e:relations/e:resourceRelation[@resourceRelationType="other"]', element=True)
related_publications = 0
related_dobjects = 0
for r in related_resources:
if get(r, 'e:relationEntry', attrib='localType') == "published":
related_publications += 1
elif get(r, 'e:relationEntry', attrib='localType') == "digitalObject":
related_dobjects += 1
graph.node[node_id]['relatedEntities'] = related_entities
graph.node[node_id]['relatedPublications'] = related_publications
graph.node[node_id]['relatedDobjects'] = related_dobjects
neighbours = get(tree, '/e:eac-cpf/e:cpfDescription/e:relations/e:cpfRelation', element=True)
for node in neighbours:
try:
neighbour_ref = node.attrib['{http://www.w3.org/1999/xlink}href']
neighbour_id = os.path.basename(neighbour_ref).split('b.htm')[0]
graph.add_node(neighbour_id)
graph.add_edge(node_id, neighbour_id, sid=node_id, tid=neighbour_id)
except KeyError:
pass
#print node_id, node_source, node_type
def get_entity_name(self, tree, ntype):
if ntype == 'Person':
if get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='familyname']"):
ln = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='familyname']")
gn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry/e:part[@localType='givenname']")
return "%s, %s" % (ln, gn)
else:
fn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry[position() = 1]/e:part")
fn = [e for e in fn if e is not None]
if type(fn) == list:
return ', '.join(fn)
return fn
else:
fn = get(tree, "/e:eac-cpf/e:cpfDescription/e:identity/e:nameEntry[position() = 1]/e:part")
if type(fn) == list:
fn = [e for e in fn if e is not None]
return ', '.join(fn)
return fn
def calculate_average_degree(self):
n = self.dbs.query(NetworkModel.graph_data) \
.filter(NetworkModel.site == self.site) \
.filter(NetworkModel.graph_type == self.graph_type) \
.one()
g = json_graph.node_link_graph(json.loads(n[0]), directed=False, multigraph=False)
return nx.degree_centrality(g)
|
example_stream_buffer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_stream_buffer.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager
binance_websocket_api_manager = BinanceWebSocketApiManager()
markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd',
'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb',
'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc',
'tusdusdt', 'atombusd', 'nxseth', 'bnbusdt', 'trxxrp', 'erdpax', 'erdbtc', 'icxbusd', 'nulsbtc', 'hotusdt',
'wavespax', 'zilbnb', 'arnbtc', 'nulsusdt', 'wintrx', 'npxsbtc', 'busdtry', 'qtumbnb', 'eosbtc', 'xlmpax',
'tomobnb', 'eosbnb', 'engbtc', 'linketh', 'xrpbtc', 'fetbtc', 'stratusdt', 'navbnb', 'bcneth', 'yoyobtc',
'nanobnb', 'saltbtc', 'tfuelusdc', 'skybnb', 'fuelbtc', 'bnbusdc', 'inseth', 'btcpax', 'batbtc', 'rlceth',
'arketh', 'ltcpax', 'ltcbusd', 'duskbtc', 'mftusdt', 'bntusdt', 'mdabtc', 'enjbtc', 'poabnb', 'nanobusd',
'paxtusd', 'hotbtc', 'bcdbtc', 'beambnb', 'trxeth', 'omgbnb', 'cdtbtc', 'eosusdc', 'dashbusd', 'cocosbtc',
'dasheth', 'xrptusd', 'atomtusd', 'rcneth', 'rpxeth', 'xlmusdc', 'aionbusd', 'nxsbtc', 'chateth', 'repbtc',
'tctusdt', 'linkusdt', 'nasbtc', 'usdsusdc', 'xvgbtc', 'elfeth', 'ctxcbtc', 'cmteth', 'gnteth', 'usdspax',
'zilbtc', 'batpax', 'stratbtc', 'xzcbtc', 'iotausdt', 'etcbnb', 'ankrusdt', 'xlmeth', 'loombtc', 'erdusdc',
'rdnbnb', 'icneth', 'vetbtc', 'cvcusdt', 'ftmpax', 'ethbullusdt', 'edoeth', 'steemeth', 'gobnb', 'hsrbtc',
'ambbtc', 'bchabcbtc', 'dntbtc', 'btctusd', 'denteth', 'snglsbtc', 'eosbullusdt', 'xlmtusd', 'tnteth',
'sysbnb', 'renusdt', 'zrxusdt', 'xlmbtc', 'stormbtc', 'ncashbnb', 'omgusdt', 'troyusdt', 'venbtc', 'modbtc',
'dogepax', 'ontusdc', 'eurbusd', 'tctbnb', 'gxsbtc', 'celrbnb', 'adausdt', 'beambtc', 'elfbtc', 'celrbtc',
'rvnusdt', 'poaeth', 'wavesusdc', 'trxbnb', 'trxusdc', 'ethbearusdt', 'ethpax', 'bateth', 'kavabtc',
'paxbtc', 'trigbnb', 'btcusdc', 'oneusdc', 'xrptry', 'stxusdt', 'strateth', 'lendeth', 'neousdc',
'mithusdt', 'btcngn', 'blzeth', 'evxeth', 'dnteth', 'grsbtc', 'arneth', 'iotabnb', 'waneth', 'xtzbnb',
'subeth', 'btsbtc', 'cvceth', 'ethusdc', 'etctusd', 'cloakbtc', 'grseth', 'eospax', 'cdteth', 'bchusdt',
'lskusdt', 'enjbusd', 'drepbtc', 'manaeth', 'tomousdt', 'algobnb', 'wtceth', 'linkpax', 'batbnb', 'sceth',
'rvnbusd', 'cvcbnb', 'manabtc', 'gasbtc', 'stxbtc', 'cloaketh', 'neotusd', 'lrceth', 'thetabtc', 'dogeusdt',
'aionbnb', 'viabtc', 'keyeth', 'nanoeth', 'ncasheth', 'bgbpusdc', 'ltobnb', 'snmeth', 'adabtc', 'btseth',
'qtumbusd', 'wtcbnb', 'dcrbtc', 'fttbnb', 'paxbnb', 'insbtc', 'gntbnb', 'etheur', 'dashusdt', 'rcnbtc',
'btcusdt', 'wanusdt', 'powrbnb', 'xmrbnb', 'trigeth', 'xzceth', 'bchbtc', 'qspbnb', 'scbnb', 'mcoeth',
'powrbtc', 'algotusd', 'ankrbtc', 'tusdeth', 'keybtc', 'usdcusdt', 'ftmusdc', 'atombnb', 'zenbtc', 'dockbtc',
'neobtc', 'phbbnb', 'bnbpax', 'brdbnb', 'trxusdt', 'trxbusd', 'mtlbtc', 'ftmtusd', 'perlusdc', 'mithbnb',
'eosbullbusd', 'reqeth', 'bccbnb', 'veneth', 'loombnb', 'trxpax', 'usdcpax', 'stormusdt', 'ognbtc', 'gvtbtc',
'iotaeth', 'naseth', 'drepusdt', 'gvteth', 'wrxusdt', 'bchabcpax', 'ongbtc', 'usdcbnb', 'dgdeth', 'salteth',
'mtleth', 'bcnbnb', 'neblbnb', 'wanbnb', 'ontusdt', 'npxsusdt', 'mftbtc', 'eosbearbusd', 'bntbtc', 'gtoeth',
'modeth', 'etcusdc', 'veteth', 'bcptpax', 'atomusdc', 'duskpax', 'kavabnb', 'lunbtc', 'adxbtc', 'bnteth',
'funbtc', 'knceth', 'dogebtc', 'bchsvpax', 'bcpttusd', 'osteth', 'oaxeth', 'wabibtc', 'appcbtc', 'qkcbtc',
'nanousdt', 'wingsbtc', 'hbarusdt', 'eurusdt', 'waveseth', 'asteth', 'linkbusd', 'btttusd', 'zecusdc',
'bnbusds', 'linkbtc', 'venusdt', 'hotbnb', 'usdtrub', 'tctbtc', 'ankrpax', 'btctry', 'adabnb', 'polybtc',
'bcceth', 'enjeth', 'bnbbusd', 'repbnb', 'bullusdt', 'vitebtc', 'btgbtc', 'renbtc', 'thetausdt', 'troybtc',
'dentbtc', 'ostbtc', 'nxsbnb', 'mithbtc', 'xmrbtc', 'tomobtc', 'nulseth', 'phbbtc', 'duskbnb', 'yoyoeth',
'ontbusd', 'btgeth', 'etcusdt', 'atomusdt', 'hcbtc', 'brdbtc', 'fttbtc', 'celrusdt', 'lskbnb', 'phbpax',
'xtzbtc', 'batusdt', 'viteusdt', 'trxbtc', 'bchtusd', 'xtzusdt', 'ftmbtc', 'enjbnb', 'arkbtc', 'wavesusdt',
'ftmusdt', 'neobusd', 'stormbnb', 'luneth', 'gntbtc', 'gtousdt', 'chzusdt', 'sntbtc', 'bandbnb', 'hoteth',
'wingseth', 'mcobtc', 'docketh', 'drepbnb', 'eosusdt', 'eostusd', 'npxseth', 'thetaeth', 'iotxbtc', 'phxbnb',
'enjusdt', 'tfuelbnb', 'mcobnb', 'ontpax', 'dcrbnb', 'batusdc', 'snglseth', 'qlcbtc', 'qspeth', 'cndeth',
'appcbnb', 'wprbtc', 'sysbtc', 'iostusdt', 'btceur', 'mtlusdt', 'ethrub', 'tfuelpax', 'maticusdt', 'ftmbnb',
'xrpbusd', 'iotxusdt', 'tusdbtusd', 'trigbtc', 'atombtc', 'bchpax', 'eosbusd', 'zileth', 'gtotusd',
'xrpbullusdt', 'onetusd', 'algobtc', 'bchsvusdt', 'gtopax', 'etceth', 'vibebtc', 'bttusdt', 'repeth',
'iostbnb', 'usdttry', 'btsbnb', 'ankrbnb', 'dltbnb', 'snteth', 'linktusd', 'nknusdt', 'rpxbtc', 'rdneth',
'cocosusdt', 'etcbusd', 'btttrx', 'bandbtc', 'steembnb', 'zecpax', 'viabnb', 'cosbnb', 'mtheth', 'xrpusdc',
'xemeth', 'pivxbnb', 'phxbtc', 'zilusdt', 'poeeth', 'bnbeur', 'bandusdt', 'vetbnb', 'lendbtc', 'xlmbnb',
'duskusdt', 'mfteth', 'funusdt', 'adabusd', 'perlbnb', 'btcbusd', 'ltobtc', 'nasbnb', 'algousdt', 'zeneth',
'bchsvusdc', 'mcousdt', 'venbnb', 'hceth', 'fetusdt', 'edobtc', 'mftbnb', 'cosusdt', 'arpausdt', 'xmrusdt',
'ctxcusdt', 'bqxbtc', 'npxsusdc', 'icxbnb', 'bchbnb', 'phbusdc', 'tomousdc', 'nulsbnb', 'rcnbnb', 'arpabnb',
'qtumbtc', 'keyusdt', 'agibtc', 'mblbtc', 'eoseth', 'tusdbtc', 'aioneth', 'storjbtc', 'lsketh', 'bchsvbtc',
'bntbusd', 'ncashbtc', 'mblbnb', 'polybnb', 'aebnb', 'ltceth', 'dogeusdc', 'wpreth', 'syseth', 'bcnbtc',
'ognusdt', 'nanobtc', 'astbtc', 'zrxeth', 'adxeth', 'gxseth', 'ethbearbusd', 'onepax', 'scbtc', 'icxbtc',
'ontbnb', 'qlceth', 'btsbusd', 'rlcbtc', 'chatbtc', 'wabibnb', 'renbnb', 'xrpbullbusd', 'wavesbtc', 'funeth',
'rlcbnb', 'phxeth', 'winbtc', 'storjeth', 'wavesbusd', 'iostbtc', 'icxeth', 'adatusd', 'nknbnb', 'btcrub',
'pivxbtc', 'perlusdt', 'bullbusd', 'bttusdc', 'bcptbtc', 'aebtc', 'ethusdt', 'ltousdt', 'subbtc', 'thetabnb',
'blzbtc', 'tfuelusdt', 'evxbtc', 'hbarbtc', 'ambeth', 'winusdt', 'qtumeth', 'dgdbtc', 'adaeth', 'busdusdt',
'xrpbnb', 'adapax', 'usdsbusds', 'cocosbnb', 'navbtc', 'rvnbtc', 'tnbbtc', 'bnbbtc', 'neopax', 'bearusdt',
'usdstusd', 'snmbtc', 'rvnbnb', 'gtobnb', 'phbtusd', 'hcusdt', 'btcusds', 'reqbtc', 'ognbnb', 'lrcbtc',
'xrpeth', 'loometh', 'zectusd', 'vibeeth', 'gobtc', 'bnbtry', 'bcdeth', 'qkceth', 'neoeth', 'paxusdt',
'bchsvtusd', 'fetbnb', 'yoyobnb', 'xlmbusd', 'skyeth', 'paxeth', 'ltcbtc', 'xvgeth', 'tnbeth', 'stratbusd',
'agieth', 'xlmusdt', 'lskbtc', 'bearbusd', 'hsreth', 'ctxcbnb', 'oaxbtc', 'qspbtc', 'iotxeth', 'qlcbnb',
'algousdc', 'etcpax', 'fueleth', 'aionusdt', 'xmreth', 'maticbtc', 'dashbnb', 'oneusdt', 'brdeth', 'viaeth',
'omgeth', 'ankrtusd', 'usdsusdt', 'ethtusd', 'wavestusd', 'iosteth', 'cmtbnb', 'ostbnb', 'ltcusdt', 'ethtry',
'zrxbtc', 'bchabcusdt', 'onebnb', 'beamusdt', 'nebleth', 'bcptbnb', 'adxbnb', 'ontbtc', 'bttbnb', 'dockusdt',
'bccbtc', 'omgbtc', 'algopax', 'neousdt', 'xrprub', 'busdngn', 'appceth', 'dentusdt', 'xzcbnb', 'tfueltusd',
'xembnb', 'arpabtc', 'ankrusdc', 'adausdc', 'kmdeth', 'troybnb', 'bnbeth', 'ltcusdc', 'databtc', 'blzbnb',
'naveth', 'btcbbtc', 'battusd', 'bnbngn', 'bchbusd', 'busdrub', 'ltctusd', 'vetbusd', 'ongbnb', 'fttusdt',
'bccusdt', 'ongusdt', 'engeth', 'usdctusd', 'etcbtc', 'gtousdc', 'mdaeth', 'vitebnb', 'erdusdt', 'dltbtc',
'bnbtusd', 'wtcbtc', 'xrpusdt', 'xrpeur', 'agibnb', 'trxtusd', 'ethbullbusd', 'iotabtc', 'xembtc',
'bchabcusdc', 'duskusdc', 'xrppax', 'mblusdt', 'kmdbtc', 'neblbtc', 'maticbnb', 'bnbrub', 'bcpteth',
'bttbtc', 'stxbnb', 'dlteth', 'onteth', 'vetusdt', 'ppteth', 'ethbtc', 'onebtc', 'ethbusd', 'zecbtc',
'erdbnb', 'xrpbearusdt', 'stratbnb', 'cmtbtc', 'cvcbtc', 'kncbtc', 'rpxbnb', 'zenbnb', 'cndbnb', 'ardrbnb',
'bchabcbusd', 'ltcbnb', 'pivxeth', 'skybtc', 'tntbtc', 'poebtc', 'steembtc', 'icxusdt', 'tfuelbtc', 'chzbtc',
'vibeth', 'winusdc', 'gtobtc', 'linkusdc', 'batbusd', 'rdnbtc', 'dataeth', 'bttpax', 'zrxbnb', 'vibbtc',
'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc',
'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt']
channels = ['kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w', 'trade',
'miniTicker', 'depth20']
for channel in channels:
binance_websocket_api_manager.create_stream(channel, markets)
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
print("waiting 30 seconds, then we start flushing the stream_buffer")
time.sleep(30)
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
try:
# remove # to activate the print function:
print(oldest_stream_data_from_stream_buffer)
except KeyError:
# Any kind of error...
# not able to process the data? write it back to the stream_buffer
binance_websocket_api_manager.add_to_stream_buffer(oldest_stream_data_from_stream_buffer)
# start a worker process to process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
time.sleep(5)
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
demo.py
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from data_generators import camera_image_generator, disk_image_generator, disk_image_with_detections_generator
import threading
import time
import numpy as np
import cv2
from flask import Flask, Response, render_template
from imutils.video import VideoStream
from opendr.engine.target import (
TrackingAnnotationList,
)
# OpenDR imports
from opendr.perception.object_tracking_2d import ObjectTracking2DFairMotLearner
from opendr.perception.object_tracking_2d import ObjectTracking2DDeepSortLearner
TEXT_COLOR = (255, 0, 255) # B G R
# Initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
output_frame = None
image_generator = None
lock = threading.Lock()
colors = [
(255, 0, 255),
(0, 0, 255),
(0, 255, 0),
(255, 0, 0),
(35, 69, 55),
(43, 63, 54),
(37, 70, 54),
(50, 67, 54),
(51, 66, 49),
(43, 75, 64),
(55, 65, 42),
(53, 63, 42),
(43, 46, 38),
(41, 41, 36),
(70, 54, 35),
(70, 54, 41),
(65, 54, 40),
(63, 55, 38),
(63, 54, 35),
(83, 73, 49),
(81, 65, 45),
(75, 65, 42),
(85, 74, 60),
(79, 64, 55),
(75, 67, 59),
(74, 75, 70),
(70, 71, 62),
(57, 62, 46),
(68, 54, 45),
(66, 52, 43),
(69, 54, 43),
(73, 59, 47),
(30, 52, 66),
(41, 55, 65),
(36, 54, 64),
(44, 87, 120),
(124, 129, 124),
(109, 120, 118),
(119, 132, 142),
(105, 125, 137),
(108, 94, 83),
(93, 78, 70),
(90, 76, 66),
(90, 76, 66),
(90, 77, 65),
(91, 82, 68),
(85, 77, 66),
(84, 79, 58),
(133, 113, 88),
(130, 127, 121),
(120, 109, 95),
(112, 110, 102),
(113, 110, 97),
(103, 109, 99),
(122, 124, 118),
(198, 234, 221),
(194, 230, 236),
]
# initialize a flask object
app = Flask(__name__)
@app.route("/")
def index():
# return the rendered template
return render_template("index.html")
def runnig_fps(alpha=0.1):
t0 = time.time()
fps_avg = 10
def wrapped():
nonlocal t0, alpha, fps_avg
t1 = time.time()
delta = t1 - t0
t0 = t1
fps_avg = alpha * (1 / delta) + (1 - alpha) * fps_avg
return fps_avg
return wrapped
def draw_fps(frame, fps):
cv2.putText(
frame,
f"{fps:.1f} FPS",
(10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
1,
TEXT_COLOR,
1,
)
def draw_dict(frame, dict, scale=5):
i = 0
for k, v in dict.items():
cv2.putText(
frame,
f"{k}: {v}",
(10, frame.shape[0] - 10 - 30 * scale * i),
cv2.FONT_HERSHEY_SIMPLEX,
scale,
TEXT_COLOR,
scale,
)
i += 1
def draw_predictions(frame, predictions: TrackingAnnotationList, is_centered=False, is_flipped_xy=True):
global colors
w, h, _ = frame.shape
for prediction in predictions.boxes:
prediction = prediction
if not hasattr(prediction, "id"):
prediction.id = 0
color = colors[int(prediction.id) * 7 % len(colors)]
x = prediction.left
y = prediction.top
if is_flipped_xy:
x = prediction.top
y = prediction.left
if is_centered:
x -= prediction.width
y -= prediction.height
cv2.rectangle(
frame,
(int(x), int(y)),
(
int(x + prediction.width),
int(y + prediction.height),
),
color,
2,
)
def fair_mot_tracking(model_name, device):
global vs, output_frame, lock
# Prep stats
fps = runnig_fps()
predict = model_name is not None and model_name != "None"
if predict:
# Init model
learner = ObjectTracking2DFairMotLearner(device=device)
if not os.path.exists("./models/" + model_name):
learner.download(model_name, "./models")
learner.load("./models/" + model_name, verbose=True)
print("Learner created")
else:
learner = None
# Loop over frames from the video stream
while True:
try:
t = time.time()
image = next(image_generator)
image_time = time.time() - t
t = time.time()
if predict:
predictions = learner.infer(image)
print("Found", len(predictions), "objects")
predict_time = time.time() - t
t = time.time()
frame = np.ascontiguousarray(
np.moveaxis(image.data, [0, 1, 2], [2, 0, 1]).copy()
)
if predict:
draw_predictions(frame, predictions)
frame = cv2.flip(frame, 1)
draw_time = time.time() - t
total_time = predict_time + image_time + draw_time
draw_dict(
frame,
{
"FPS": fps(),
"predict": str(int(predict_time * 100 / total_time)) + "%",
"get data": str(int(image_time * 100 / total_time)) + "%",
"draw": str(int(draw_time * 100 / total_time)) + "%",
# "tvec": tvec, "rvec": rvec, "f": [fx, fy],
},
1,
)
with lock:
output_frame = frame.copy()
except Exception as e:
print(e)
raise e
def deep_sort_tracking(model_name, device):
global vs, output_frame, lock
# Prep stats
fps = runnig_fps()
predict = model_name is not None and model_name != "None"
if predict:
learner = ObjectTracking2DDeepSortLearner(device=device)
if not os.path.exists("./models/" + model_name):
learner.download(model_name, "./models")
learner.load("./models/" + model_name, verbose=True)
print("Learner created")
else:
learner = None
# Loop over frames from the video stream
while True:
try:
t = time.time()
image_with_detections = next(image_generator)
image_time = time.time() - t
t = time.time()
if predict:
predictions = learner.infer(image_with_detections)
print("Found", len(predictions), "objects")
predict_time = time.time() - t
t = time.time()
frame = np.ascontiguousarray(
np.moveaxis(image_with_detections.data, [0, 1, 2], [2, 0, 1]).copy()
)
if predict:
draw_predictions(frame, predictions, is_centered=False, is_flipped_xy=False)
frame = cv2.flip(frame, 1)
draw_time = time.time() - t
total_time = predict_time + image_time + draw_time
draw_dict(
frame,
{
"FPS": fps(),
"predict": str(int(predict_time * 100 / total_time)) + "%",
"get data": str(int(image_time * 100 / total_time)) + "%",
"draw": str(int(draw_time * 100 / total_time)) + "%",
},
1,
)
with lock:
output_frame = frame.copy()
except Exception as e:
print(e)
raise e
def generate():
# grab global references to the output frame and lock variables
global output_frame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if output_frame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", output_frame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" +
bytearray(encodedImage) +
b"\r\n"
)
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(
generate(), mimetype="multipart/x-mixed-replace; boundary=frame"
)
# check to see if this is the main thread of execution
if __name__ == "__main__":
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-i", "--ip", type=str, required=True, help="IP address of the device"
)
ap.add_argument(
"-o",
"--port",
type=int,
required=True,
help="Ephemeral port number of the server (1024 to 65535)",
)
ap.add_argument(
"-m",
"--model_name",
type=str,
default="fairmot_dla34",
help="Model identifier",
)
ap.add_argument(
"-dp",
"--data_path",
type=str,
default="",
help="Path for disk-based data generators",
)
ap.add_argument(
"-ds",
"--data_splits",
type=str,
default="",
help="Path for mot dataset splits",
)
ap.add_argument(
"-s", "--source", type=str, default="disk", help="Data source",
)
ap.add_argument(
"-v",
"--video_source",
type=int,
default=0,
help="ID of the video source to use",
)
ap.add_argument(
"-a",
"--algorithm",
type=str,
default="fair_mot",
help="Which algortihm to run",
choices=["fair_mot", "deep_sort"],
)
ap.add_argument(
"-dev",
"--device",
type=str,
default="cuda",
help="Which device to use",
)
args = vars(ap.parse_args())
image_generator = {
"disk": lambda: disk_image_generator(
args["data_path"], {"mot20": args["data_splits"]}, count=None
),
"disk_with_detections": lambda: disk_image_with_detections_generator(
args["data_path"], {"mot20": args["data_splits"]}, count=None
),
"camera": lambda: camera_image_generator(
VideoStream(src=args["video_source"]).start()
),
}[args["source"]]()
time.sleep(2.0)
algorithm = {
"fair_mot": fair_mot_tracking,
"deep_sort": deep_sort_tracking,
}[args["algorithm"]]
# start a thread that will perform motion detection
t = threading.Thread(
target=algorithm, args=(args["model_name"], args["device"])
)
t.daemon = True
t.start()
# start the flask app
app.run(
host=args["ip"],
port=args["port"],
debug=True,
threaded=True,
use_reloader=False,
)
|
chef.py
|
import asyncio
import atexit
import multiprocessing as mp
import os
import re
import subprocess
import sys
from asyncio.events import get_running_loop
from pathlib import Path
from queue import Queue
from signal import SIGINT, SIGKILL, SIGTERM, SIGUSR1, signal
from threading import Timer
from time import perf_counter
import inotify_simple
from aiohttp import ClientSession, web
from rich import print
from rich.console import Console
from rich.traceback import install
PORT = 10043
console = Console()
install(console=console)
here = Path(__file__).parent
requests = Queue()
logo = """
⣠⣴⠖⠛⠉⠙⠓⠒⠦⢤⣀⡀ ⣀⣤⡶⠖⠛⠛⠳⡄ ⣠⡶⠟⡆ ⢀⣴⡾⢻⠄
⢾⠟⠁ ⠈⢙⣷⣾⣿⣥⣀⣀⣀⣤⠞⠁ ⢠⣾⢟⣠⠜⠁ ⣰⣿⣋⡠⠎
⣠⣿⠟ ⠉⠉⣴⣶⠿⠛⠉⠉⠉⣹⣿⠋⠉ ⢀⣴⣾⠟⠛⠉⠉⢉⣿⣿⣉⠁
⣼⡿⠃ ⠉⠁ ⣼⣿⠃⢀⣤⣤ ⢀⣩⣤⣤ ⢠⢚⣿⡿⠉⠉
⣼⡿⠁ ⣰⣿⣣⠞⣹⣿⠏ ⣰⣿⠟⠁⢨⠇⠈⣼⡿
⣸⣿⠁ ⢠⣿⡷⠁⢰⣿⠏⢀⣼⣿⠃⣀⠴⠋⢀⣾⡿⠁
⣿⡇ ⡴⠻⣿ ⢸⣏⡠⠊⢻⣯⠉⢀⣀⠔⢫⡿⠁
⣿⡇ ⣠⠞ ⠉⠉⠉ ⢠⡿⠁
⠹⣷⡀ ⣀⡤⠚⠁ ⣠⠟
⠈⠙⠓⠶⠶⠶⠒⠋⠁ ⣀⣀⣀⣤⠞⠁
⠛⠛⠋
"""
cpp20_flags = [
"g++",
"-std=c++20",
"-Wshadow",
"-Wall",
"-Wfloat-equal",
"-fsanitize=address",
"-fno-omit-frame-pointer",
"-pedantic",
]
def run_cpp(file_path: Path, inputs):
os.setpgrp()
out_dir = here / "out"
out_dir.mkdir(exist_ok=True)
out_path = out_dir / file_path.stem
print(f"-> Compiling {file_path.name}: ", end="", flush=True)
compile_start = perf_counter()
compile_proc = subprocess.run([*cpp20_flags, file_path, "-o", out_path])
compile_time = perf_counter() - compile_start
compile_time_out = f"[grey70]{compile_time * 1000:.0f}ms[/grey70]"
if compile_proc.returncode == 0:
print(f"[bold green]OK[/bold green]", compile_time_out)
else:
print(f"[bold red]ERROR[/bold red]", compile_time_out)
return
if inputs:
for inp in inputs:
console.print("[yellow]-> Input:[/yellow]", inp)
if subprocess.run(out_path, input=inp[0].encode()).returncode:
break
else:
subprocess.run(out_path)
def run_py(file_path: Path):
os.setpgrp()
print(f"-> Running {file_path.name}: ")
subprocess.run(["python3", file_path], cwd=file_path.parent)
def prepareCpp(problemInfo, templateContent):
preamble = "/*\n"
preamble += f" * {problemInfo['name']}\n"
preamble += f" *\n"
preamble += f" * Time Limit: {problemInfo['timeLimit'] / 1000}s\n"
preamble += f" * Problem URL: {problemInfo['url']}\n"
preamble += f" */\n"
tests = "".join([f"/*\n{test['input']}*/" for test in problemInfo["tests"]])
return f"{preamble}\n{templateContent}\n{tests}\n"
selected_lang = "cpp20"
langOptions = {
"cpp20": {
"runner": run_cpp,
"suffix": ".cpp",
"template": "templates/default.cpp",
"special_templates": {
"codingcompetitions.withgoogle.com": "templates/google.cpp",
},
"prepareTemplate": prepareCpp,
}
}
async def createProblemFile(problemInfo):
lang = langOptions[selected_lang]
def getTemplate():
for substr, templFileName in lang.get("special_templates", {}).items():
if substr in problemInfo["url"]:
return Path(templFileName).read_text()
if "template" in lang:
return Path(lang["template"]).read_text()
return ""
problemFilePath = here / f"{problemInfo['name']}{lang['suffix']}"
if problemFilePath.exists():
print("->", problemFilePath, "already exists")
return problemFilePath
problemFilePath.touch()
fileContent = getTemplate()
if "prepareTemplate" in lang:
fileContent = prepareCpp(problemInfo, fileContent)
problemFilePath.write_text(fileContent)
return problemFilePath
async def openFileInEditor(filePath):
subprocess.run(["code", here.as_posix(), filePath.as_posix()])
async def handleRequest(request):
console.log(request)
problemInfo = await request.json()
print(problemInfo)
problemFilePath = await createProblemFile(problemInfo)
await openFileInEditor(problemFilePath)
return web.Response(text="Thanks :)")
async def sendSigTermToSelf():
os.kill(os.getpid(), SIGTERM)
async def handleKillRequest(request):
console.log("Received Exit request")
asyncio.create_task(sendSigTermToSelf())
return web.Response(text="Request received.")
app = web.Application()
app.add_routes([web.post("/", handleRequest), web.get("/exit", handleKillRequest)])
class TimedSet:
"""A set that automatically removes elements after a specified TTL"""
def __init__(self, ttl):
self.set = set()
self.ttl = ttl
def __contains__(self, item):
if item in self.set:
return True
self.set.add(item)
Timer(self.ttl, self.set.remove, args=[item]).start()
return False
def getCommentedInput(filePath):
with open(filePath) as f:
fileContent = f.read()
lastMatchPos = None
matches = []
for match in re.finditer(r"/\*\n(.+?)\*/", fileContent, re.MULTILINE | re.DOTALL):
if lastMatchPos and lastMatchPos + 3 < match.start():
matches.clear()
matches.append([match.group(1)])
lastMatchPos = match.end()
return matches if lastMatchPos and lastMatchPos + 3 > len(fileContent) else []
def watcher():
inotify = inotify_simple.INotify()
watch_paths = [here, *[p for p in here.glob("AoC*") if p.is_dir()]]
watch_descriptors = {
inotify.add_watch(watch_path, inotify_simple.flags.CLOSE_WRITE): watch_path
for watch_path in watch_paths
}
changed_events = TimedSet(1)
current_subproc = mp.Process()
def kill_children(*args):
if current_subproc.is_alive():
os.killpg(current_subproc.pid, SIGKILL)
print("[red]\n-> Terminating current process [/red]")
return True
return False
@atexit.register
def cleanup(*args):
kill_children()
console.log("Closing watch descriptor")
for watch_desc in watch_descriptors.keys():
inotify.rm_watch(watch_desc)
inotify.close()
sys.exit()
def handle_sigint(*args):
if not kill_children():
os.kill(os.getppid(), SIGTERM)
signal(SIGINT, handle_sigint)
signal(SIGTERM, cleanup)
print("-> Started watching directory for changes")
while True:
for event in inotify.read():
if event.name in changed_events:
continue
console.log(event)
file_path = watch_descriptors[event.wd] / event.name
kill_children()
if file_path.name == Path(__file__).name:
# Send SIGUSR1 to parent process requesting a restart
os.kill(os.getppid(), SIGUSR1)
elif file_path.suffix == ".cpp":
inputs = getCommentedInput(file_path)
current_subproc = mp.Process(target=run_cpp, args=(file_path, inputs), daemon=True)
current_subproc.start()
elif file_path.suffix == ".py":
current_subproc = mp.Process(target=run_py, args=(file_path,), daemon=True)
current_subproc.start()
async def precompile_headers():
"""Precompile bits/stdc++.h for faster compilation"""
def get_header():
"""Find bits/stdc++.h"""
for path in Path("/usr/include").glob("**/bits/stdc++.h"):
# Ignore 32 bit version
if "32/bits" in path.as_posix():
continue
return path
dest_dir = here / "bits"
dest_dir.mkdir(exist_ok=True)
dest_header = dest_dir / "stdc++.h"
if not (headerPath := get_header()):
print("Could not find bits/stdc++.h")
return
dest_header.write_text(headerPath.read_text())
start_time = perf_counter()
compiling_proc = await asyncio.create_subprocess_exec(*cpp20_flags, "stdc++.h", cwd=dest_dir)
print(
"-> Precompiling headers:",
"[bold green]OK[/bold green]"
if await compiling_proc.wait() == 0
else "[bold red]ERROR[bold red]",
f"[grey70]{perf_counter() - start_time:.2f}s[/grey70]",
)
async def killExistingInstance():
try:
async with ClientSession() as session:
async with session.get(f"http://localhost:{PORT}/exit"):
print("-> Another instance was detected: Exit requested")
except:
pass
async def main():
print(logo)
await killExistingInstance()
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "localhost", PORT, reuse_address=True, reuse_port=True)
asyncio.create_task(site.start())
print(f"-> Listening on port {PORT}")
watch_proc = mp.Process(target=watcher)
kill_sig = asyncio.Event()
def prepareExit():
watch_proc.terminate()
watch_proc.join()
kill_sig.set()
def restart():
prepareExit()
console.log("Restarting Chef")
os.execl(sys.executable, sys.executable, __file__)
asyncio.create_task(precompile_headers())
watch_proc.start()
loop = get_running_loop()
loop.add_signal_handler(SIGINT, lambda *args: None)
loop.add_signal_handler(SIGTERM, prepareExit)
loop.add_signal_handler(SIGUSR1, restart)
await kill_sig.wait()
if __name__ == "__main__":
asyncio.run(main())
|
client.py
|
from base64 import b64encode
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import ssl
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if not client.is_asyncio_based():
client.disconnect()
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``requests.Session`` object to be used
when sending requests to the server. Use it if you
need to add special client options such as proxy
servers, SSL certificates, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
http_session=None,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = http_session
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), r.json())
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get cookies and other settings from the long-polling connection
# so that they are preserved when connecting to the WebSocket route
cookies = None
extra_options = {}
if self.http:
# cookies
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
# auth
if 'Authorization' not in headers and self.http.auth is not None:
if not isinstance(self.http.auth, tuple): # pragma: no cover
raise ValueError('Only basic authentication is supported')
basic_auth = '{}:{}'.format(
self.http.auth[0], self.http.auth[1]).encode('utf-8')
basic_auth = b64encode(basic_auth).decode('utf-8')
headers['Authorization'] = 'Basic ' + basic_auth
# cert
# this can be given as ('certfile', 'keyfile') or just 'certfile'
if isinstance(self.http.cert, tuple):
extra_options['sslopt'] = {
'certfile': self.http.cert[0],
'keyfile': self.http.cert[1]}
elif self.http.cert:
extra_options['sslopt'] = {'certfile': self.http.cert}
# proxies
if self.http.proxies:
proxy_url = None
if websocket_url.startswith('ws://'):
proxy_url = self.http.proxies.get(
'ws', self.http.proxies.get('http'))
else: # wss://
proxy_url = self.http.proxies.get(
'wss', self.http.proxies.get('https'))
if proxy_url:
parsed_url = urllib.parse.urlparse(
proxy_url if '://' in proxy_url
else 'scheme://' + proxy_url)
print(parsed_url)
extra_options['http_proxy_host'] = parsed_url.hostname
extra_options['http_proxy_port'] = parsed_url.port
extra_options['http_proxy_auth'] = (
(parsed_url.username, parsed_url.password)
if parsed_url.username or parsed_url.password
else None)
# verify
if not self.http.verify:
self.ssl_verify = False
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING,
data=six.text_type('probe')).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout, verify=self.ssl_verify)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
if self.ping_loop_event is None:
self.ping_loop_event = self.create_event()
else:
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close(timeout=0)
self.queue.put(None)
break
#self.pong_received = False
#self._send_packet(packet.Packet(packet.PING))
#self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
self.logger.info('Read loop polling started.')
while self.state == 'connected':
url_timestamp = self._get_url_timestamp()
self.logger.info(
'Sending polling GET request to ' + self.base_url + url_timestamp)
r = self._send_request(
'GET', self.base_url + url_timestamp,
timeout=max(self.ping_interval, self.ping_timeout) + 5)
self.logger.info('Response: ')
self.logger.info(r)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
self.logger.info(r.status_code)
self.logger.info(r.content)
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
self.logger.info('Write loop started')
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode(always_bytes=False)
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
BotConcurrentModule.py
|
import asyncio
import threading
import importlib
from multiprocessing import Process, Queue
from botsdk.Bot import Bot
from botsdk.BotRequest import BotRequest
from botsdk.tool.BotException import BotException
from botsdk.tool.JsonConfig import getConfig
from botsdk.tool.TimeTest import *
from botsdk.tool.Error import printTraceBack,debugPrint
from botsdk.tool.HandlePacket import *
#线程默认运行函数
def workThreadRun(loop):
try:
debugPrint("线程初始化")
asyncio.set_event_loop(loop)
debugPrint("线程进入循环")
loop.run_forever()
except Exception as e:
printTraceBack("线程异常退出")
#进程循环函数
async def workProcessRun(queue, threadList):
loop = asyncio.get_event_loop()
debugPrint("进程进入循环")
useThreadCount = 0
while True:
try:
try:
event = queue.get_nowait()
except Exception as e:
await asyncio.sleep(0.05)
continue
bot = Bot(*event[0])
request = BotRequest(event[1][0], bot)
module = importlib.reload(__import__(f"plugins.{event[2][0]}", fromlist=(event[2][0],)))
plugin = getattr(module, "handle")()
handle = None
for i in plugin.getListenTarget():
if i[1] == event[2][1]:
handle = i[2]
break
if handle is not None:
debugPrint("添加到协程中")
asyncio.run_coroutine_threadsafe(asyncHandlePacket(handle, request), threadList[useThreadCount][1])
debugPrint("添加完成")
useThreadCount += 1
if useThreadCount == len(threadList):
useThreadCount = 0
else:
debugPrint("进程遇到错误的Target")
except Exception as e:
printTraceBack()
#进程初始化函数
def workProcessInit(queue, threadSize):
debugPrint("新进程初始化")
threadList = []
debugPrint("线程创建中")
for i in range(threadSize):
loop = asyncio.new_event_loop()
threadList.append([threading.Thread(target = workThreadRun, args = (loop, )), loop])
for i in threadList:
i[0].start()
debugPrint("线程创建完成")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
debugPrint("进程初始化完成")
loop.run_until_complete(workProcessRun(queue, threadList))
class BotConcurrentModule:
def __init__(self):
pass
def __del__(self):
pass
def addTask(self, data):
pass
class defaultBotConcurrentModule(BotConcurrentModule):
def __init__(self, processSize, threadSize):
if processSize * threadSize == 0:
raise BotException("错误的进程/线程数量")
self.processSize = processSize
self.threadSize = threadSize
self.processList = []
self.queue = Queue()
for i in range(int(getConfig()["workProcess"])):
self.processList.append(Process(target=workProcessInit, args=(self.queue, threadSize)))
for i in self.processList:
i.start()
def addTask(self,data):
self.queue.put(data)
|
tk_agent.py
|
import logging
import time
import threading
import copy
import os
import chess
import chess.pgn
# from tkinter import *
# from tkinter.ttk import *
# from tkinter import filedialog, font
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import font, filedialog
import PIL
from PIL import ImageTk, Image, ImageOps
# By en:User:Cburnett - File:Chess klt45.svg, CC BY-SA 3.0,
# https://commons.wikimedia.org/w/index.php?curid=20363779
# https://commons.wikimedia.org/wiki/Template:SVG_chess_pieces
# convert -background none -density 128 -resize 128x Chess_bdt45.svg cbd.gif
class GameBoard(ttk.Frame):
def __init__(self, parent, size=64, r=0, c=0, color1="white", color2="gray",
bg_color="black", ol_color="black", log=None):
'''size is the size of a square, in pixels'''
self.rows = 8
self.log = log
self.columns = 8
self.size = size
self.color1 = color1
self.color2 = color2
self.bg_color = bg_color
self.ol_color = ol_color
self.height = None
self.width = None
self.pieces = {}
self.figrep = {"png60": ["wp60.png", "wn60.png", "wb60.png", "wr60.png", "wq60.png",
"wk60.png", "bp60.png", "bn60.png", "bb60.png", "br60.png",
"bq60.png", "bk60.png"]}
self.position = []
self.valid_move_list = []
self.move_part = 0
self.move_actor = None
self.cur_move = ""
for _ in range(8):
row = []
for _ in range(8):
row.append(-1)
self.position.append(row)
canvas_width = self.columns * size
canvas_height = self.rows * size
ttk.Frame.__init__(self, parent)
self.canvas = tk.Canvas(parent, borderwidth=0, highlightthickness=0,
width=canvas_width, height=canvas_height, background=bg_color)
self.canvas.grid(row=r, column=c, sticky="news")
# self.canvas.grid_columnconfigure(0, weight=1)
# self.canvas.grid_rowconfigure(0, weight=1)
self.load_figures(size)
self.canvas.bind("<Configure>", self.refresh)
self.canvas.bind("<Button-1>", self.mouse_click)
def load_figures(self, size):
self.png60s = []
img_size = size-4
for fn in self.figrep['png60']:
fp = os.path.join('resources/pieces', fn)
img = Image.open(fp).convert('RGBA').resize(
(img_size, img_size), Image.ANTIALIAS)
self.png60s.append(ImageTk.PhotoImage(img))
def mouse_click(self, event):
x = chr(event.x//self.size+ord('a'))
y = chr(7-(event.y//self.size)+ord('1'))
if self.move_part == 0:
cc = f"{x}{y}"
self.cur_move = ""
else:
cc = f"{self.cur_move}{x}{y}"
if len(self.valid_move_list) > 0:
f = []
for mv in self.valid_move_list:
if mv[0:self.move_part*2+2] == cc:
f.append(mv)
if len(f) > 0:
if self.move_part == 0:
self.cur_move = cc
self.move_part += 1
return
else:
if len(f) > 1 and self.log is not None:
self.log.error("This is non-implemented situation")
# XXX: select pawn upgrade GUI
self.move_actor(f[0])
else:
if self.log is not None:
self.log.warning("Invalid entry!")
self.move_part = 0
self.cur_move = ""
else:
if self.log is not None:
self.log.warning(
"You are not allowed to click on the board at this time!")
self.move_part = 0
self.cur_move = 0
print(f"Click at {cc}")
def register_moves(self, move_list, move_actor=None):
print(move_list)
self.move_actor = move_actor
self.move_part = 0
self.valid_move_list = move_list
def refresh(self, event=None):
redraw_fields = False
if event is not None:
if self.height != event.height or self.width != event.width:
redraw_fields = True
self.width = event.width
self.height = event.height
# Redraw the board, possibly in response to window being resized
xsize = int((self.width-1) / self.columns)
ysize = int((self.height-1) / self.rows)
self.size = min(xsize, ysize)
self.load_figures(self.size)
if redraw_fields is True:
self.canvas.delete("square")
self.canvas.delete("piece")
color = self.color2
for row in range(self.rows):
color = self.color1 if color == self.color2 else self.color2
for col in range(self.columns):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
if redraw_fields is True:
self.canvas.create_rectangle(x1, y1, x2, y2, outline=self.ol_color,
fill=color, tags="square")
color = self.color1 if color == self.color2 else self.color2
img_ind = self.position[row][col]
if img_ind != -1:
self.canvas.create_image(x1, y1, image=self.png60s[img_ind],
tags=("piece"), anchor="nw")
self.canvas.tag_raise("piece")
self.canvas.tag_lower("square")
class TkAgent:
def __init__(self, appque, prefs):
self.figrep = {"int": [1, 2, 3, 4, 5, 6, 0, -1, -2, -3, -4, -5, -6],
"pythc": [(chess.PAWN, chess.WHITE), (chess.KNIGHT, chess.WHITE),
(chess.BISHOP, chess.WHITE), (chess.ROOK, chess.WHITE),
(chess.QUEEN, chess.WHITE), (chess.KING, chess.WHITE),
(chess.PAWN, chess.BLACK), (chess.KNIGHT, chess.BLACK),
(chess.BISHOP, chess.BLACK), (chess.ROOK, chess.BLACK),
(chess.QUEEN, chess.BLACK), (chess.KING, chess.BLACK)],
"unic": "♟♞♝♜♛♚ ♙♘♗♖♕♔",
"png60": ["wp60.png", "wn60.png", "wb60.png", "wr60.png", "wq60.png",
"wk60.png", "bp60.png", "bn60.png", "bb60.png", "br60.png",
"bq60.png", "bk60.png"],
"ascii": "PNBRQK.pnbrqk"}
self.turquoise = {
"light": "#D8DBE2", # Gainsboro
"dlight": "#A9BCC0", # Pastel Blue
"turquoise": "#58A4B0", # Cadet Blue
"silver": "#C0C0C0", # Silver
"darkgray": "#A9A9A9", # Darkgray
"ldark": "#373F41", # Charcoil
"dark": "#2E3532", # Jet
"ddark": "#282A32", # Charleston Green
"dddark": "#1B1B1E", # Eerie Black
"xdddark": "#202022", # X Black
}
self.chesssym = {"unic": ["-", "×", "†", "‡", "½"],
"ascii": ["-", "x", "+", "#", "1/2"]}
self.name = 'TkAgent'
self.prefs = prefs
self.log = logging.getLogger("TkAgent")
self.appque = appque
self.orientation = True
self.active = False
self.agent_state_cache = {}
self.tk_moves = []
self.png60s = None
self.title_text = None
self.board = None
self.tk_board = None
self.tk_board2 = None
self.title = None
self.movelist = None
self.analist = None
self.gui_init = False
self.tkapp_thread_active = True
self.tkapp_thread = threading.Thread(
target=self.tkapp_worker_thread, args=(self.appque, self.log))
self.tkapp_thread.setDaemon(True)
self.tkapp_thread.start()
t0 = time.time()
warned = False
while self.gui_init is False:
time.sleep(0.1)
if time.time()-t0 > 2 and warned is False:
warned = True
self.log.error("Tk GUI is not responding in time!")
if time.time()-t0 > 5:
return
self.log.info("GUI online.")
self.active = True
def agent_ready(self):
return self.active
def quit(self):
self.tkapp_thread_active = False
def board2pos(self, board):
pos = []
for y in reversed(range(8)):
row = []
for x in range(8):
fig = board.piece_at(chess.square(x, y))
if fig is not None:
ind = 0
for f0 in self.figrep['pythc']:
if fig.piece_type == f0[0] and fig.color == f0[1]:
break
ind += 1
if ind < len(self.figrep['pythc']):
row.append(ind)
else:
row.append(-1)
self.log.error(f'Figure conversion error at {x}{y}')
else:
row.append(-1)
pos.append(row)
return pos
def display_board(self, board, attribs={'unicode': True, 'invert': False,
'white_name': 'white', 'black_name': 'black'}):
self.log.info("display_board")
if self.gui_init is False:
return
self.title_text.set(attribs["white_name"] +
" - " + attribs["black_name"])
self.tk_board.position = self.board2pos(board)
self.tk_board.refresh()
try:
game = chess.pgn.Game().from_board(board)
game.headers["White"] = attribs["white_name"]
game.headers["Black"] = attribs["black_name"]
pgntxt = str(game)
pgntxt = ''.join(pgntxt.splitlines()[8:])
except Exception as e:
self.log.error(f"Invalid PGN position, {e}")
return
self.movelist.delete("1.0", tk.END)
self.movelist.insert("1.0", pgntxt)
def display_move(self, move_msg):
pass
def display_info(self, board, info, max_board_preview_hmoves=6):
# if info['multipv_ind'] != 1:
# return
mpv_ind = info['multipv_ind']
ninfo = copy.deepcopy(info)
nboard = copy.deepcopy(board)
nboard_cut = copy.deepcopy(nboard)
max_cut = max_board_preview_hmoves
if 'variant' in ninfo:
ml = []
mv = ''
if nboard.turn is False:
mv = (nboard.fullmove_number,)
mv += ("..",)
rel_mv = 0
for move in ninfo['variant']:
if move is None:
self.log.error(f"None-move in variant: {ninfo}")
if nboard.turn is True:
mv = (nboard.fullmove_number,)
try:
san = nboard.san(move)
except Exception as e:
self.log.warning(
f"Internal error '{e}' at san conversion.")
san = None
if san is not None:
mv += (san,)
else:
self.log.info(
f"Variant cut off due to san-conversion-error: '{mv}'")
break
if nboard.turn is False:
ml.append(mv)
mv = ""
nboard.push(move)
if rel_mv < max_cut:
nboard_cut.push(move)
rel_mv += 1
if mv != "":
ml.append(mv)
mv = ""
ninfo['variant'] = ml
self.analist.delete(f"{mpv_ind}.0", f"{mpv_ind+1}.0")
self.analist.insert(f"{mpv_ind}.0", f"[{mpv_ind}]: " + str(ml) + "\n")
if mpv_ind == 1:
self.tk_board2.position = self.board2pos(nboard_cut)
self.tk_board2.refresh()
def agent_states(self, msg):
self.agent_state_cache[msg['actor']] = msg
def do_move(self, move):
self.appque.put({'move': {'uci': move, 'actor': self.name}})
def set_valid_moves(self, board, vals):
tk_moves = []
self.board = board
if vals is not None:
for v in vals:
tk_moves.append(vals[v])
self.tk_board.register_moves(tk_moves, self.do_move)
def tkapp_worker_thread(self, appque, log):
root = tk.Tk()
default_font = font.nametofont("TkDefaultFont")
default_font.configure(size=10)
text_font = font.nametofont("TkTextFont")
text_font.configure(size=10)
fixed_font = font.nametofont("TkFixedFont")
fixed_font.configure(size=10)
# self.frame = Frame(root)
for i in range(3):
tk.Grid.columnconfigure(root, i, weight=1)
# if i>0:
tk.Grid.rowconfigure(root, i, weight=1)
# for i in range(3):
# Grid.columnconfigure(self.frame, i, weight=1)
# Grid.rowconfigure(self.frame, i, weight=1)
# self.frame.grid(sticky=N+S+W+E)
self.bof = ttk.Frame(root)
for i in range(3):
tk.Grid.columnconfigure(self.bof, i, weight=1)
# if i>0:
tk.Grid.rowconfigure(self.bof, i, weight=1)
self.bof.grid(row=1, column=0, sticky="news")
self.tk_board = GameBoard(self.bof, log=self.log, r=1, c=0,
color1=self.turquoise['dlight'],
color2=self.turquoise['turquoise'],
bg_color=self.turquoise['ldark'],
ol_color=self.turquoise['darkgray'])
self.tk_board.grid(row=1, column=0, sticky="news")
s = 20
self.bfr = ttk.Frame(self.bof)
self.bfr.grid(row=2, column=0, sticky="news")
img = Image.open(
'web/images/bb.png').convert('RGBA').resize((s, s), Image.ANTIALIAS)
bbackimg = ImageTk.PhotoImage(img)
self.button_bback = ttk.Button(
self.bfr, image=bbackimg, command=self.on_fast_back)
# background=self.turquoise['dlight'], , relief=FLAT)
# self.button_bback.configure(padx=15, pady=15)
self.button_bback.grid(
row=0, column=0, sticky="ew", padx=(5, 5), pady=(7, 7))
img = Image.open(
'web/images/b.png').convert('RGBA').resize((s, s), Image.ANTIALIAS)
backimg = ImageTk.PhotoImage(img)
self.button_back = ttk.Button(
self.bfr, image=backimg, command=self.on_back)
# , relief=FLAT)
self.button_back.grid(row=0, column=1, sticky="ew",
padx=(5, 5), pady=(7, 7))
img = Image.open(
'web/images/stop.png').convert('RGBA').resize((s, s), Image.ANTIALIAS)
stopimg = ImageTk.PhotoImage(img)
self.button_stop = ttk.Button(
self.bfr, image=stopimg, command=self.on_stop)
# , relief=FLAT)
self.button_stop.grid(row=0, column=2, sticky="ew",
padx=(8, 8), pady=(7, 7))
img = Image.open(
'web/images/f.png').convert('RGBA').resize((s, s), Image.ANTIALIAS)
forimg = ImageTk.PhotoImage(img)
self.button_forward = ttk.Button(
self.bfr, image=forimg, command=self.on_forward)
# , relief=FLAT)
self.button_forward.grid(
row=0, column=3, sticky="ew", padx=(5, 5), pady=(7, 7))
img = Image.open(
'web/images/ff.png').convert('RGBA').resize((s, s), Image.ANTIALIAS)
fforimg = ImageTk.PhotoImage(img)
self.button_fforward = ttk.Button(
self.bfr, image=fforimg, command=self.on_fast_forward)
# , relief=FLAT)
self.button_fforward.grid(
row=0, column=4, sticky="ew", padx=(5, 5), pady=(7, 7))
self.tk_board2 = GameBoard(root, log=self.log, r=1, c=2, color1=self.turquoise['dlight'],
color2=self.turquoise['turquoise'],
bg_color=self.turquoise['ldark'],
ol_color=self.turquoise['darkgray'])
self.movelist = tk.Text(root)
self.analist = tk.Text(root, height=10)
self.title_text = tk.StringVar()
self.title = ttk.Label(root, textvariable=self.title_text)
self.title.grid(row=0, column=0, sticky="ew")
self.movelist.grid(row=1, column=1, sticky="news")
self.tk_board2.grid(row=1, column=2, sticky="news")
self.analist.grid(row=2, column=2, sticky="ew")
menubar = tk.Menu(root)
root.config(menu=menubar)
file_menu = tk.Menu(menubar, tearoff=0)
file_menu.add_command(label="New Game", command=self.on_new, underline=0,
accelerator="Ctrl+n")
root.bind_all("<Control-n>", self.on_new)
file_menu.add_separator()
file_menu.add_command(label="Open PGN file...", command=self.on_pgn_open, underline=0,
accelerator="Ctrl+o")
root.bind_all("<Control-o>", self.on_pgn_open)
file_menu.add_command(label="Save PGN file...", command=self.on_pgn_save, underline=0,
accelerator="Ctrl+s")
root.bind_all("<Control-s>", self.on_pgn_save)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self.on_exit, underline=1,
accelerator="Ctrl+x")
root.bind_all("<Control-x>", self.on_exit)
game_menu = tk.Menu(menubar, tearoff=0)
submenu = tk.Menu(game_menu)
submenu.add_command(label="Player - Player", command=self.on_mode_pp)
submenu.add_command(label="Player - Engine", command=self.on_mode_pe)
submenu.add_command(label="Engine - Player", command=self.on_mode_ep)
submenu.add_command(label="Engline - Engine", command=self.on_mode_ee)
game_menu.add_cascade(label="Game mode", menu=submenu, underline=6)
game_menu.add_separator()
game_menu.add_command(label="Go", command=self.on_go,
underline=0, accelerator="Ctrl+g")
root.bind_all("<Control-g>", self.on_go)
game_menu.add_command(
label="Beginning", command=self.on_fast_back, underline=0)
game_menu.add_command(label="Back", command=self.on_back, underline=0,
accelerator="Ctrl+b")
root.bind_all("<Control-b>", self.on_back)
game_menu.add_command(label="Forward", command=self.on_forward, underline=0,
accelerator="Ctrl+f")
root.bind_all("<Control-f>", self.on_forward)
game_menu.add_command(
label="End", command=self.on_fast_forward, underline=0)
game_menu.add_separator()
game_menu.add_command(label="Stop", command=self.on_stop, underline=1,
accelerator="Ctrl+t")
root.bind_all("<Control-t>", self.on_stop)
game_menu.add_separator()
game_menu.add_command(label="Analyse", command=self.on_analyse, underline=0,
accelerator="Ctrl+a")
root.bind_all("<Control-a>", self.on_analyse)
menubar.add_cascade(label="File", menu=file_menu, underline=0)
menubar.add_cascade(label="Game", menu=game_menu, underline=0)
self.gui_init = True
root.mainloop()
def on_new(self, event=None):
self.appque.put({'new game': '', 'actor': self.name})
def on_go(self, event=None):
self.appque.put({'go': 'current', 'actor': self.name})
def on_back(self, event=None):
self.appque.put({'back': '', 'actor': self.name})
def on_fast_back(self, event=None):
self.appque.put({'fast-back': '', 'actor': self.name})
def on_forward(self, event=None):
self.appque.put({'forward': '', 'actor': self.name})
def on_fast_forward(self, event=None):
self.appque.put({'fast-forward': '', 'actor': self.name})
def on_stop(self, event=None):
self.appque.put({'stop': '', 'actor': self.name})
def on_analyse(self, event=None):
self.appque.put({'analysis': '', 'actor': self.name})
def on_exit(self, event=None):
self.appque.put({'quit': '', 'actor': self.name})
def on_mode_pp(self, event=None):
self.appque.put({'game_mode': 'PLAYER_PLAYER'})
def on_mode_pe(self, event=None):
self.appque.put({'game_mode': 'PLAYER_ENGINE'})
def on_mode_ep(self, event=None):
self.appque.put({'game_mode': 'ENGINE_PLAYER'})
def on_mode_ee(self, event=None):
self.appque.put({'game_mode': 'ENGINE_ENGINE'})
def load_pgns(self, fn):
try:
with open(fn, 'r') as f:
d = f.read()
except Exception as e:
print(f"Failed to read {fn}: {e}")
return None
pt = d.split('\n\n')
if len(pt) % 2 != 0:
print("Bad structure or incomplete!")
return None
if len(pt) == 0:
print("Empty")
return None
games = []
for i in range(0, len(pt), 2):
gi = pt[i]+"\n\n"+pt[i+1]
games.append(gi)
return games
def on_pgn_open(self, event=None):
filename = filedialog.askopenfilename(initialdir=".", title="Select PGN file",
filetypes=(("pgn files", "*.pgn"),
("all files", "*.*")))
games = self.load_pgns(filename)
if len(games) > 1:
self.log.warning(
f'File contained {len(games)}, only first game read.')
if games is not None:
self.appque.put({'pgn_game': {'pgn_data': games[0]}})
def on_pgn_save(self, event=None):
filename = filedialog.asksaveasfilename(initialdir=".",
title="Select PGN file",
filetypes=(("pgn files", "*.pgn"),
("all files", "*.*")))
print(filename)
|
test_content.py
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now(datetime.timezone.utc)}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(server, demisto_api_key):
"""
Turn off telemetry on the AMI instance
:param server: demisto server to connect to
:param demisto_api_key: api key to use for connection
:return: None
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_api_key, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, api_key=demisto_api_key, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests, is_filter_configured,
filtered_tests, skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name, prints_manager, is_ami, thread_index=thread_index)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
env_results_path = './env_results.json'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
if not os.path.isfile(env_results_path):
print_warning(f'Did not find {env_results_path} file, assuming server version is {default_version}.')
return default_version
with open(env_results_path, 'r') as json_file:
env_results = json.load(json_file)
instances_ami_names = set([env.get('AmiName') for env in env_results if ami_env in env.get('Role', '')])
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
# turn off telemetry
turn_off_telemetry(server, demisto_api_key)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(demisto_api_key, server, proxy.ami.docker_ip + ':' + proxy.PROXY_PORT)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key, prints_manager,
thread_index=thread_index)
proxy.configure_proxy_in_demisto(demisto_api_key, server, '')
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_api_key, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
prints_manager.add_print_job(f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}',
print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
prints_manager.add_print_job(print_msg, print, thread_index, include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 25770
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
util.py
|
import asyncio
import io
import logging
import os
import random
import re
import socket
import subprocess
import tarfile
import threading
import time
from contextlib import contextmanager
from functools import partial as p
from io import BytesIO
from typing import Any, Dict, List, TypeVar, cast
import docker
import netifaces as ni
import requests
import yaml
from tests.helpers.assertions import regex_search_matches_output
from tests.paths import SELFDESCRIBE_JSON, TEST_SERVICES_DIR
DEFAULT_TIMEOUT = int(os.environ.get("DEFAULT_TIMEOUT", 30))
DOCKER_API_VERSION = "1.34"
STATSD_RE = re.compile(r"SignalFx StatsD monitor: Listening on host & port udp:\[::\]:([0-9]*)")
T = TypeVar("T")
def retry_on_ebadf(func: T) -> T:
max_tries = 10
def wrap(*args, **kwargs):
tries = 0
while True:
try:
return func(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
msg = str(e).lower()
if "bad file descriptor" in msg or "operation on non-socket" in msg:
tries += 1
if tries >= max_tries:
raise
logging.error("Retrying ConnectionError EBADF")
continue
raise
except OSError as e:
if e.errno == 9:
tries += 1
if tries >= max_tries:
raise
logging.error("Retrying OSError EBADF")
continue
raise
return cast(T, wrap)
def get_docker_client():
return docker.from_env(version=DOCKER_API_VERSION)
def has_docker_image(client, name):
return name in [t for image in client.images.list() for t in image.tags]
def assert_wait_for(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2, on_fail=None):
"""
Runs `wait_for` but raises an assertion if it fails, optionally calling
`on_fail` before raising an AssertionError
"""
if not wait_for(test, timeout_seconds, interval_seconds):
if on_fail:
on_fail()
raise AssertionError("test '%s' still failng after %d seconds" % (test, timeout_seconds))
def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Repeatedly calls the test function for timeout_seconds until either test
returns a truthy value, at which point the function returns True -- or the
timeout is exceeded, at which point it will return False.
"""
start = time.time()
while True:
if test():
return True
if time.time() - start > timeout_seconds:
return False
time.sleep(interval_seconds)
def wait_for_value(func, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Waits for func to return a non-None value and returns that value. If the
func is still returning None after the timeout, returns None to the caller.
"""
start = time.time()
while True:
val = func()
if val is not None:
return val
if time.time() - start > timeout_seconds:
return None
time.sleep(interval_seconds)
def wait_for_assertion(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Waits for the given `test` function passed in to not raise an
AssertionError. It is is still raising such an error after the
timeout_seconds, that exception will be raised by this function itself.
"""
e = None
def wrap():
nonlocal e
try:
test()
except AssertionError as err:
e = err
return False
return True
if not wait_for(wrap, timeout_seconds, interval_seconds):
raise e # pylint: disable=raising-bad-type
def ensure_always(test, timeout_seconds=DEFAULT_TIMEOUT, interval_seconds=0.2):
"""
Repeatedly calls the given test. If it ever returns false before the timeout
given is completed, returns False, otherwise True.
"""
start = time.time()
while True:
if not test():
return False
if time.time() - start > timeout_seconds:
return True
time.sleep(interval_seconds)
def ensure_never(test, timeout_seconds=DEFAULT_TIMEOUT):
"""
Repeatedly calls the given test. If it ever returns true before the timeout
given is completed, returns False, otherwise True.
"""
start = time.time()
while True:
if test():
return False
if time.time() - start > timeout_seconds:
return True
time.sleep(0.2)
def print_lines(msg):
"""
Print each line separately to make it easier to read in pytest output
"""
for line in msg.splitlines():
print(line)
def container_ip(container):
container.reload()
return container.attrs["NetworkSettings"]["IPAddress"]
def container_hostname(container):
container.reload()
return container.attrs["Config"]["Hostname"]
LOCALHOST_COUNTER = 0
# Ensure a unique internal status server host address. This supports up to
# 255 concurrent agents on the same pytest worker process, and up to 255
# pytest workers, which should be plenty
def get_unique_localhost():
worker = int(re.sub(r"\D", "", os.environ.get("PYTEST_XDIST_WORKER", "0")))
global LOCALHOST_COUNTER # pylint:disable=global-statement
LOCALHOST_COUNTER += 1
return "127.%d.%d.0" % (worker, LOCALHOST_COUNTER % 255)
@contextmanager
def run_subprocess(command: List[str], env: Dict[Any, Any] = None, **kwargs):
# subprocess on Windows has a bug where it doesn't like Path.
proc = retry_on_ebadf(
lambda: subprocess.Popen(
[str(c) for c in command], env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs
)
)()
get_output = pull_from_reader_in_background(proc.stdout)
try:
yield [get_output, proc.pid]
finally:
proc.terminate()
proc.wait(15)
@contextmanager
def run_container(image_name, wait_for_ip=True, print_logs=True, **kwargs):
files = kwargs.pop("files", [])
client = get_docker_client()
if not image_name.startswith("sha256"):
try:
client.images.get(image_name)
except docker.errors.APIError:
client.images.pull(image_name)
container = retry_on_ebadf(
lambda: retry(lambda: client.containers.create(image_name, **kwargs), docker.errors.DockerException)
)()
for src, dst in files:
copy_file_into_container(src, container, dst)
try:
container.start()
def has_ip_addr():
container.reload()
return container.attrs["NetworkSettings"]["IPAddress"]
if wait_for_ip:
wait_for(has_ip_addr, timeout_seconds=5)
yield container
finally:
try:
if print_logs:
print_lines(
"Container %s/%s logs:\n%s" % (image_name, container.name, container.logs().decode("utf-8"))
)
container.remove(force=True, v=True)
except docker.errors.NotFound:
pass
@contextmanager
def run_service(service_name, buildargs=None, print_logs=True, path=None, dockerfile="./Dockerfile", **kwargs):
if buildargs is None:
buildargs = {}
if path is None:
path = os.path.join(TEST_SERVICES_DIR, service_name)
client = get_docker_client()
image, _ = retry_on_ebadf(
lambda: retry(
lambda: client.images.build(
path=str(path), dockerfile=dockerfile, rm=True, forcerm=True, buildargs=buildargs
),
docker.errors.BuildError,
)
)()
with run_container(image.id, print_logs=print_logs, **kwargs) as cont:
yield cont
def get_monitor_metrics_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
metrics = {}
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for mon in doc["Monitors"]:
if monitor == mon["monitorType"] and "metrics" in mon.keys() and mon["metrics"]:
metrics = mon["metrics"]
break
return metrics
def get_all_monitor_metrics_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
return set(get_monitor_metrics_from_selfdescribe(monitor, json_path).keys())
def get_default_monitor_metrics_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
default_metrics = set()
all_metrics = get_monitor_metrics_from_selfdescribe(monitor, json_path)
for metric in all_metrics:
if all_metrics[metric]["default"]:
default_metrics.add(metric)
return default_metrics
def get_custom_monitor_metrics_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
custom_metrics = set()
all_metrics = get_monitor_metrics_from_selfdescribe(monitor, json_path)
for metric in all_metrics:
if not all_metrics[metric]["default"]:
custom_metrics.add(metric)
return custom_metrics
def get_monitor_dims_from_selfdescribe(monitor, json_path=SELFDESCRIBE_JSON):
dims = set()
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for mon in doc["Monitors"]:
if monitor == mon["monitorType"] and "dimensions" in mon.keys() and mon["dimensions"]:
dims = set(mon["dimensions"].keys())
break
return dims
def get_observer_dims_from_selfdescribe(observer, json_path=SELFDESCRIBE_JSON):
dims = set()
with open(json_path, "r", encoding="utf-8") as fd:
doc = yaml.safe_load(fd.read())
for obs in doc["Observers"]:
if observer == obs["observerType"] and "dimensions" in obs.keys() and obs["dimensions"]:
dims = set(obs["dimensions"].keys())
break
return dims
def get_host_ip():
gws = ni.gateways()
interface = gws["default"][ni.AF_INET][1]
return ni.ifaddresses(interface)[ni.AF_INET][0]["addr"]
def send_udp_message(host, port, msg):
"""
Send a datagram to the given host/port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Internet # UDP
sock.sendto(msg.encode("utf-8"), (host, port))
def retry(function, exception, max_attempts=5, interval_seconds=5):
"""
Retry function up to max_attempts if exception is caught
"""
for attempt in range(max_attempts):
try:
return function()
except exception as e:
assert attempt < (max_attempts - 1), "%s failed after %d attempts!\n%s" % (function, max_attempts, str(e))
time.sleep(interval_seconds)
return None
def get_statsd_port(agent):
"""
Discover an open port of running StatsD monitor
"""
assert wait_for(p(regex_search_matches_output, agent.get_output, STATSD_RE.search))
regex_results = STATSD_RE.search(agent.output)
return int(regex_results.groups()[0])
def pull_from_reader_in_background(reader):
output = io.BytesIO()
def pull_output():
while True:
# If any output is waiting, grab it.
try:
byt = reader.read(1)
except OSError:
return
if not byt:
return
if isinstance(byt, str):
byt = byt.encode("utf-8")
output.write(byt)
threading.Thread(target=pull_output, daemon=True).start()
def get_output():
return output.getvalue().decode("utf-8")
return get_output
def random_hex(bits=64):
"""Return random hex number as a string with the given number of bits (default 64)"""
return hex(random.getrandbits(bits))[2:]
def copy_file_content_into_container(content, container, target_path):
copy_file_object_into_container(
BytesIO(content.encode("utf-8")), container, target_path, size=len(content.encode("utf-8"))
)
# This is more convoluted that it should be but seems to be the simplest way in
# the face of docker-in-docker environments where volume bind mounting is hard.
@retry_on_ebadf
def copy_file_object_into_container(fd, container, target_path, size=None):
tario = BytesIO()
tar = tarfile.TarFile(fileobj=tario, mode="w") # pylint: disable=consider-using-with
info = tarfile.TarInfo(name=target_path)
if size is None:
size = os.fstat(fd.fileno()).st_size
info.size = size
tar.addfile(info, fd)
tar.close()
container.put_archive("/", tario.getvalue())
# Apparently when the above `put_archive` call returns, the file isn't
# necessarily fully written in the container, so wait a bit to ensure it
# is.
time.sleep(2)
def copy_file_into_container(path, container, target_path):
with open(path, "rb") as fd:
copy_file_object_into_container(fd, container, target_path)
def path_exists_in_container(container, path):
code, _ = container.exec_run("test -e %s" % path)
return code == 0
def get_container_file_content(container, path):
assert path_exists_in_container(container, path), "File %s does not exist!" % path
return container.exec_run("cat %s" % path)[1].decode("utf-8")
def get_stripped_container_id(container_id):
return container_id.replace("docker://", "").replace("cri-o://", "")
@contextmanager
def run_simple_sanic_app(app):
app_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
app_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
app_sock.bind(("127.0.0.1", 0))
port = app_sock.getsockname()[1]
loop = asyncio.new_event_loop()
async def start_server():
server = app.create_server(sock=app_sock, access_log=False, return_asyncio_server=True)
loop.create_task(server)
loop.create_task(start_server())
threading.Thread(target=loop.run_forever, daemon=True).start()
try:
yield f"http://127.0.0.1:{port}"
finally:
app_sock.close()
loop.stop()
|
vnbitfinex.py
|
# encoding: UTF-8
from __future__ import print_function
import json
import requests
import traceback
import ssl
from threading import Thread
from queue import Queue, Empty
import time
import hmac
import base64
import hashlib
import websocket
from six.moves import input
WEBSOCKET_V2_URL = 'wss://api.bitfinex.com/ws/2'
RESTFUL_V1_URL = 'https://api.bitfinex.com/v1'
RESTFUL_V1_DOMAIN = 'https://api.bitfinex.com'
########################################################################
class BitfinexApi(object):
""""""
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False # 初始化策略的启动状态为
self.restQueue = Queue()
self.restThread = None
self.apiKey = "AmvX7LQ6YT"
self.secretKey = "UspYAXbIBzoReEPFl"
# ----------------------------------------------------------------------
def start(self):
""""""
self.ws = websocket.create_connection(WEBSOCKET_V2_URL,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run) # wes 线程启动
self.thread.start()
self.restThread = Thread(target=self.runRest) # rest 线程启动
self.restThread.start()
self.onConnect()
# ----------------------------------------------------------------------
def reconnect(self):
""""""
self.ws = websocket.create_connection(WEBSOCKET_V2_URL,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
# ----------------------------------------------------------------------
def run(self):
""""""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
# ----------------------------------------------------------------------
def runRest(self):
""""""
while self.active:
try:
path, callback, post = self.restQueue.get(timeout=1)
if post:
self.httpPost(path, callback)
else:
self.httpGet(path, callback)
except Empty:
pass
except Exception as e:
print(traceback.format_exc())
# ----------------------------------------------------------------------
def close(self):
""""""
self.active = False
if self.thread:
self.thread.join()
if self.restThread:
self.thread.join()
# ----------------------------------------------------------------------
def onConnect(self):
""""""
print('connected')
# ----------------------------------------------------------------------
def onData(self, data):
""""""
print(data)
# ----------------------------------------------------------------------
def onError(self, msg):
""""""
print(msg)
# ----------------------------------------------------------------------
def sendReq(self, req):
""""""
self.ws.send(json.dumps(req))
# ----------------------------------------------------------------------
def sendRestReq(self, path, callback, post):
""""""
self.restQueue.put((path, callback, post))
# ----------------------------------------------------------------------
def httpGet(self, path, callback):
""""""
url = RESTFUL_V1_URL + path
resp = requests.get(url)
callback(resp.json())
def __signature(self, payload):
j = json.dumps(payload)
data = base64.standard_b64encode(j.encode('utf8'))
h = hmac.new(self.secretKey.encode('utf8'), data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.apiKey,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data
}
def _post(self, path, params):
body = params
rawBody = json.dumps(body)
headers = self.__signature(body)
url = RESTFUL_V1_DOMAIN + path
resp = requests.post(url, headers=headers, data=rawBody, verify=True)
return resp
def httpPost(self, path, callback):
""""""
if path.startswith("/"):
v1_path = "/v1" + path
else:
v1_path = '/v1/' + path
payload = {
'request': v1_path,
'nonce': str(int(time.time() * 1000000)) # nonce
}
resp = self._post(v1_path, payload)
callback(resp.json())
|
infeed_outfeed_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from threading import Thread
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import ipu
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
def next_feed_id():
result = 'feed' + str(next_feed_id.feed_count)
next_feed_id.feed_count += 1
return result
next_feed_id.feed_count = 0
class InfeedOutfeedTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNonTuple(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(20, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNonTupleFiniteDataset(self):
dataset = tu.create_single_increasing_dataset(10,
shape=[4, 4],
repeat=False)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(10, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(46, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatTuple(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, im1, im2):
v = v + im1 + im2
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatTupleMerge(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, im1, im2):
v = v + im1 + im2
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess, merge_infeed_io_copies=True)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNamed(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return {"a": image_1, "b": image_2}
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
# Note how the parameters are swapped around.
def body(v1, v2, b, a):
v1 = v1 + a
v2 = v2 + b
return (v1, v2)
def my_net():
v1 = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
v2 = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v1, v2], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(4, [4, 4]))
self.assertAllClose(result[1], np.broadcast_to(27, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedMultipleRepeats(self):
dataset = tu.create_single_increasing_dataset(2, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
r = ipu.loops.repeat(5, body, [r], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedWhileLoopNonTuple(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def cond(i, v):
return i < 20
def body(i, v, x):
v = v + x
return (i + 1, v)
def my_net(v):
i = 0
r = ipu.loops.while_loop(cond, body, (i, v), infeed_queue)
return r[1]
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedWhileLoopTuple(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def cond(i, v):
return i < 20
def body(i, v, im1, im2):
v = v + im1 + im2
return (i + 1, v)
def my_net(v):
i = 0
r = ipu.loops.while_loop(cond, body, (i, v), infeed_queue)
return r[1]
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(129.5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedMultipleRuns(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def program(iters):
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(iters, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
return ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(program(0))
self.assertAllClose(result[0], np.broadcast_to(0, [4, 4]))
# The iterator has not moved - next element should be all 1s.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(1, [4, 4]))
# The iterator has moved - in the next two iterations it should pull 2 and 3.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
# The iterator has moved - in the next two iterations it should pull 4 and 5.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(9, [4, 4]))
@test_util.deprecated_graph_mode_only
def testTwoInfeedsDifferentPrograms(self):
dataset1 = tu.create_single_increasing_dataset(20, shape=[4, 4])
dataset2 = tu.create_single_increasing_dataset(3, shape=[4, 4])
infeed_queue1 = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset1, feed_name=next_feed_id())
infeed_queue2 = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset2, feed_name=next_feed_id())
def program(iters, infeed_queue):
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(iters, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
return ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue1.initializer)
sess.run(infeed_queue2.initializer)
result = sess.run(program(5, infeed_queue1))
self.assertAllClose(result[0], np.broadcast_to(10, [4, 4]))
result = sess.run(program(5, infeed_queue2))
self.assertAllClose(result[0], np.broadcast_to(4, [4, 4]))
result = sess.run(program(5, infeed_queue1))
self.assertAllClose(result[0], np.broadcast_to(35, [4, 4]))
result = sess.run(program(5, infeed_queue2))
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testUndefinedShape(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
dataset = dataset.batch(10, drop_remainder=False)
with self.assertRaisesRegex(ValueError, r'Output shape \((\?|None),'):
ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
@test_util.deprecated_graph_mode_only
def testMultipleInitializations(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
_ = infeed_queue.initializer
with self.assertRaisesRegex(
ValueError,
'The IPUInfeedQueue `initializer` function can only be accessed once.'
):
_ = infeed_queue.initializer
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeed(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
with ops.control_dependencies([train]):
return array_ops.identity(loss)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
self.assertTrue(initial_loss > final_loss)
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedEnequeue(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v):
outfeed = outfeed_queue.enqueue(v)
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
with self.assertRaises(ValueError):
ipu.ipu_compiler.compile(my_net, inputs=[v])
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedEnequeueDifferentGraphs(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v):
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.Graph().as_default():
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
self.assertFalse(outfeed_queue.enqueued)
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[v])
self.assertTrue(outfeed_queue.enqueued)
with ops.Graph().as_default():
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
# Not enqueued in the current graph.
self.assertFalse(outfeed_queue.enqueued)
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[v])
self.assertTrue(outfeed_queue.enqueued)
@test_util.deprecated_graph_mode_only
def testSingleOutfeedRepeatNonTuple(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v):
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(21, [4, 4]))
outfed = sess.run(outfeed)
for i in range(20):
self.assertAllClose(outfed[i], np.broadcast_to(i + 1, [4, 4]))
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedsInSameGraph(self):
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def inner_body(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return v, outfeed
def body(v):
outfeed = outfeed_queue1.enqueue(v)
v = ipu.loops.repeat(10, inner_body, v)
return v, outfeed
def my_net(v):
r = ipu.loops.repeat(10, body, v)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
dequeued1 = outfeed_queue1.dequeue()
dequeued2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(res, {v: 0.0})
out1, out2 = sess.run([dequeued1, dequeued2])
self.assertAllEqual(np.arange(0, 100, step=10), out1)
self.assertAllEqual(np.arange(0, 100, step=1), out2)
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNonTuple(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v, x):
v = v + x
outfeed = outfeed_queue.enqueue(v)
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
outfed = sess.run(outfeed_queue.dequeue())
self.assertEqual(outfed.shape, (20, 4, 4))
self.assertAllClose(outfed[-1], result[0])
self.assertAllClose(outfed[5], np.broadcast_to(16, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatTuple(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue((v, im1, im2))
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result[0][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[0][1], np.broadcast_to(11.5, shape))
self.assertAllClose(outfed_result[0][2], np.broadcast_to(19.5, shape))
self.assertAllClose(outfed_result[0][3], np.broadcast_to(24.5, shape))
self.assertAllClose(outfed_result[0][4], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result[1][0], np.broadcast_to(0, shape))
self.assertAllClose(outfed_result[1][1], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[1][2], np.broadcast_to(2, shape))
self.assertAllClose(outfed_result[1][3], np.broadcast_to(0, shape))
self.assertAllClose(outfed_result[1][4], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[2][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[2][1], np.broadcast_to(5.5, shape))
self.assertAllClose(outfed_result[2][2], np.broadcast_to(6, shape))
self.assertAllClose(outfed_result[2][3], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[2][4], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatTupleLast(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue((v, im1, im2))
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result[0], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result[1], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[2], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNamed(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue({"v": v, "image1": im1, "image2": im2})
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result["v"][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["v"][1], np.broadcast_to(11.5, shape))
self.assertAllClose(outfed_result["v"][2], np.broadcast_to(19.5, shape))
self.assertAllClose(outfed_result["v"][3], np.broadcast_to(24.5, shape))
self.assertAllClose(outfed_result["v"][4], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result["image1"][0],
np.broadcast_to(0, shape))
self.assertAllClose(outfed_result["image1"][1],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image1"][2],
np.broadcast_to(2, shape))
self.assertAllClose(outfed_result["image1"][3],
np.broadcast_to(0, shape))
self.assertAllClose(outfed_result["image1"][4],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image2"][0],
np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["image2"][1],
np.broadcast_to(5.5, shape))
self.assertAllClose(outfed_result["image2"][2],
np.broadcast_to(6, shape))
self.assertAllClose(outfed_result["image2"][3],
np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["image2"][4],
np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNamedLast(self):
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue({"v": v, "image1": im1, "image2": im2})
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result["v"], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result["image1"], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image2"], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeedAndOutfeedGetAll(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
outfeed = outfeed_queue.enqueue(loss)
with ops.control_dependencies([train]):
return (array_ops.identity(loss), outfeed)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
outfeeds = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
outfed = sess.run(outfeeds)
self.assertTrue(initial_loss > final_loss)
self.assertTrue(outfed.shape[0], 1001)
self.assertTrue(isinstance(outfed, np.ndarray))
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeedAndOutfeedGetLast(self):
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
outfeed = outfeed_queue.enqueue(loss)
with ops.control_dependencies([train]):
return (array_ops.identity(loss), outfeed)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
outfeeds = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
outfed = sess.run(outfeeds)
self.assertTrue(initial_loss > final_loss)
self.assertTrue(outfed == final_loss)
# Check that a scalar is returned instead of a numpy array
self.assertTrue(isinstance(outfed, np.float32))
@test_util.deprecated_graph_mode_only
def testTwoOutfeedsDifferentPrograms(self):
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id())
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id())
def body1(v):
outfeed = outfeed_queue1.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net1(v):
r = ipu.loops.repeat(5, body1, (v))
return r
def body2(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net2(v):
r = ipu.loops.repeat(7, body2, (v))
return r
with ops.device('cpu'):
v1 = array_ops.placeholder(np.float32, [4, 4])
v2 = array_ops.placeholder(np.float32, [5, 5])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res1 = ipu.ipu_compiler.compile(my_net1, inputs=[v1])
res2 = ipu.ipu_compiler.compile(my_net2, inputs=[v2])
outfeed1 = outfeed_queue1.dequeue()
outfeed2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
result1 = sess.run(res1, {v1: np.ones([4, 4], np.float32)})
self.assertAllClose(result1[0], np.broadcast_to(6, [4, 4]))
outfed1 = sess.run(outfeed1)
for i in range(5):
self.assertAllClose(outfed1[i], np.broadcast_to(i + 1, [4, 4]))
result2 = sess.run(res2, {v2: np.full([5, 5], 4, np.float32)})
self.assertAllClose(result2[0], np.broadcast_to(11, [5, 5]))
outfed2 = sess.run(outfeed2)
for i in range(7):
self.assertAllClose(outfed2[i], np.broadcast_to(i + 4, [5, 5]))
@test_util.deprecated_graph_mode_only
def testTwoOutfeedsDifferentProgramsDelayedOutfeedRead(self):
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id())
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id())
def body1(v):
outfeed = outfeed_queue1.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net1(v):
r = ipu.loops.repeat(5, body1, (v))
return r
def body2(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net2(v):
r = ipu.loops.repeat(7, body2, (v))
return r
with ops.device('cpu'):
v1 = array_ops.placeholder(np.float32, [4, 4])
v2 = array_ops.placeholder(np.float32, [5, 5])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res1 = ipu.ipu_compiler.compile(my_net1, inputs=[v1])
res2 = ipu.ipu_compiler.compile(my_net2, inputs=[v2])
outfeed1 = outfeed_queue1.dequeue()
outfeed2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
result1 = sess.run(res1, {v1: np.ones([4, 4], np.float32)})
self.assertAllClose(result1[0], np.broadcast_to(6, [4, 4]))
result2 = sess.run(res2, {v2: np.full([5, 5], 4, np.float32)})
self.assertAllClose(result2[0], np.broadcast_to(11, [5, 5]))
outfed1 = sess.run(outfeed1)
for i in range(5):
self.assertAllClose(outfed1[i], np.broadcast_to(i + 1, [4, 4]))
outfed2 = sess.run(outfeed2)
for i in range(7):
self.assertAllClose(outfed2[i], np.broadcast_to(i + 4, [5, 5]))
@test_util.deprecated_graph_mode_only
def testTwoOutfeedsDifferentProgramsSameFeedName(self):
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(feed_name="a")
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(feed_name="a")
def body1(v):
outfeed = outfeed_queue1.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net1(v):
r = ipu.loops.repeat(5, body1, (v))
return r
def body2(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net2(v):
r = ipu.loops.repeat(7, body2, (v))
return r
with ops.device('cpu'):
v1 = array_ops.placeholder(np.float32, [4, 4])
v2 = array_ops.placeholder(np.float32, [5, 5])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res1 = ipu.ipu_compiler.compile(my_net1, inputs=[v1])
res2 = ipu.ipu_compiler.compile(my_net2, inputs=[v2])
outfeed_queue1.dequeue()
outfeed_queue2.dequeue()
with session_lib.Session() as sess:
tu.ReportJSON(self, sess)
sess.run(res1, {v1: np.ones([4, 4], np.float32)})
with self.assertRaisesRegex(errors.FailedPreconditionError,
'Outfeed with id=\'a\' already exists'):
sess.run(res2, {v2: np.full([5, 5], 4, np.float32)})
@test_util.deprecated_graph_mode_only
def testInfeedUsingDatasetWithNestedDictNotUnpacked(self):
x = {
"x0": np.ones(shape=[2], dtype=np.float32),
"x1": np.ones(shape=[2], dtype=np.float32)
}
y = np.ones(shape=[2], dtype=np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((x, y))
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
ds, feed_name=next_feed_id())
def body(total, x, y):
total += x["x0"] + x["x1"] + y
return total
def my_net():
r = ipu.loops.repeat(2, body, [0.0], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertEqual(result, [6.0])
@test_util.deprecated_graph_mode_only
def testInfeedUsingDatasetWithOnlyDictIsUnpacked(self):
x = {
"x0": np.ones(shape=[2], dtype=np.float32),
"x1": np.ones(shape=[2], dtype=np.float32)
}
ds = dataset_ops.Dataset.from_tensor_slices((x,))
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
ds, feed_name=next_feed_id())
def body(total, x0, x1):
total += x0 + x1
return total
def my_net():
r = ipu.loops.repeat(2, body, [0.0], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertEqual(result, [4.0])
@test_util.deprecated_graph_mode_only
def testSingleOutfeedWithBatchingNonTuple(self):
b_count = 4
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), io_batch_size=b_count)
def body(a, b):
c = a + b
outfeed = outfeed_queue.enqueue(c)
return (c, a, outfeed)
def my_net(a, b):
r = ipu.loops.repeat(8, body, (a, b))
return r
with ops.device('cpu'):
a = array_ops.placeholder(np.float32, [4])
b = array_ops.placeholder(np.float32, [4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[a, b])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
report = tu.ReportJSON(self, sess)
report.reset()
fd = {a: [1., 1., 1., 1.], b: [0., 0., 0., 0.]}
result = sess.run(res, fd)
self.assertAllClose(result[0], [34., 34., 34., 34.])
self.assertAllClose(result[1], [21., 21., 21., 21.])
outfed = sess.run(outfeed)
# A list of 8 fibonacci numbers
expected = [[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.],
[5., 5., 5., 5.], [8., 8., 8., 8.], [13., 13., 13., 13.],
[21., 21., 21., 21.], [34., 34., 34., 34.]]
self.assertAllClose(outfed, expected)
report.parse_log()
report.assert_each_tile_memory_is_less_than(4234, tolerance=0.1)
report.assert_total_tile_memory(319467, tolerance=0.1)
total_outfeeds = 0
for s in report.get_execution_reports()[0]['simulation']['steps']:
if s['type'] == 'StreamCopy':
# batch x shape=[4] floats
if s['totalData'] == b_count * 4 * 4:
total_outfeeds = total_outfeeds + 1
self.assertEqual(total_outfeeds, 8 // b_count)
@test_util.deprecated_graph_mode_only
def testSingleOutfeedWithBatchingFinalNonTuple(self):
b_count = 4
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(),
io_batch_size=b_count,
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(a, b):
c = a + b
outfeed = outfeed_queue.enqueue(c)
return (c, a, outfeed)
def my_net(a, b):
r = ipu.loops.repeat(8, body, (a, b))
return r
with ops.device('cpu'):
a = array_ops.placeholder(np.float32, [4])
b = array_ops.placeholder(np.float32, [4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[a, b])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
report = tu.ReportJSON(self, sess)
report.reset()
fd = {a: [1., 1., 1., 1.], b: [0., 0., 0., 0.]}
result = sess.run(res, fd)
self.assertAllClose(result[0], [34., 34., 34., 34.])
self.assertAllClose(result[1], [21., 21., 21., 21.])
outfed = sess.run(outfeed)
# A list of 8 fibonacci numbers
expected = [34., 34., 34., 34.]
self.assertAllClose(outfed, expected)
report.parse_log()
report.assert_each_tile_memory_is_less_than(4234, tolerance=0.1)
report.assert_total_tile_memory(319467, tolerance=0.1)
total_outfeeds = 0
for s in report.get_execution_reports()[0]['simulation']['steps']:
if s['type'] == 'StreamCopy':
# batch x shape=[4] floats + header
if s['totalData'] == b_count * 4 * 4:
total_outfeeds = total_outfeeds + 1
self.assertEqual(total_outfeeds, 8 // b_count)
@test_util.deprecated_graph_mode_only
def testSingleOutfeedWithBatchingFinalNonTupleRearrangeDevice(self):
b_count = 4
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(),
io_batch_size=b_count,
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(a, b):
c = math_ops.matmul(a, b)
outfeed = outfeed_queue.enqueue(c)
return (a, b, outfeed)
def my_net(a, b):
r = ipu.loops.repeat(8, body, (a, b))
return r
with ops.device('cpu'):
a = array_ops.placeholder(np.float32, [1024, 256])
b = array_ops.placeholder(np.float32, [256, 512])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[a, b])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
report = tu.ReportJSON(self, sess)
report.reset()
fd = {a: np.ones(a.shape), b: np.zeros(b.shape)}
sess.run(res, fd)
outfed = sess.run(outfeed)
# The convolution output
expected = np.zeros([1024, 512])
self.assertAllClose(outfed, expected)
report.parse_log()
report.assert_max_tile_memory(50775)
report.assert_total_tile_memory(57042815)
total_outfeeds = 0
for s in report.get_execution_reports()[0]['simulation']['steps']:
if s['type'] == 'StreamCopy':
# batch x shape=[1024*256] floats
if s['totalData'] == b_count * 1024 * 512 * 4:
total_outfeeds = total_outfeeds + 1
self.assertEqual(total_outfeeds, 8 // b_count)
@test_util.deprecated_graph_mode_only
def testSingleOutfeedWithBatchingFinalNonTupleRearrangeHost(self):
b_count = 4
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(),
io_batch_size=b_count,
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(a, b):
c = math_ops.matmul(a, b)
outfeed = outfeed_queue.enqueue(c)
return (a, b, outfeed)
def my_net(a, b):
r = ipu.loops.repeat(8, body, (a, b))
return r
with ops.device('cpu'):
a = array_ops.placeholder(np.float32, [1024, 256])
b = array_ops.placeholder(np.float32, [256, 512])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[a, b])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
report = tu.ReportJSON(self,
sess,
always_rearrange_copies_on_the_host=True)
report.reset()
fd = {a: np.ones(a.shape), b: np.zeros(b.shape)}
sess.run(res, fd)
outfed = sess.run(outfeed)
# The convolution output
expected = np.zeros([1024, 512])
self.assertAllClose(outfed, expected)
report.parse_log()
report.assert_max_tile_memory(50645)
report.assert_total_tile_memory(56489822)
total_outfeeds = 0
for s in report.get_execution_reports()[0]['simulation']['steps']:
if s['type'] == 'StreamCopy':
# batch x shape=[1024*256] floats
if s['totalData'] == b_count * 1024 * 512 * 4:
total_outfeeds = total_outfeeds + 1
self.assertEqual(total_outfeeds, 8 // b_count)
@test_util.deprecated_graph_mode_only
def testInfeedDeleteBeforeInitializeShouldRaiseException(self):
dataset = tu.create_single_increasing_dataset(10)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, "delete_name")
delete_op = infeed_queue.deleter
with session_lib.Session() as sess:
with self.assertRaisesRegex(errors_impl.NotFoundError,
"Infeed with id='delete_name'"):
sess.run(delete_op)
@test_util.deprecated_graph_mode_only
def testInfeedNameCanBeReusedAfterDeletion(self):
for _ in range(2):
dataset = tu.create_single_increasing_dataset(10)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, "reuse_name")
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(infeed_queue.deleter)
@test_util.deprecated_graph_mode_only
def testInfeedRestart(self):
# Note: This is not something that we encourage or need to support,
# but it is the current behaviour that we document in this test:
# The infeed can be restarted by calling the `deleter` and then the
# `initializer` again.
def data_gen():
for i in range(5):
yield i
dataset = dataset_ops.Dataset.from_generator(data_gen, np.float32, ())
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, "reuse_name")
init_op = infeed_queue.initializer
delete_op = infeed_queue.deleter
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(5, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
[res] = ipu.ipu_compiler.compile(my_net, inputs=[0.0])
with session_lib.Session() as sess:
for _ in range(2):
sess.run(init_op)
self.assertEqual(sum(range(5)), sess.run(res))
sess.run(delete_op)
@test_util.deprecated_graph_mode_only
def testInfeedOutfeedContinuousDequeuing(self):
num_iterations = 1000
dataset = tu.create_single_increasing_dataset(num_iterations, shape=[1])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(x):
return outfeed_queue.enqueue(x)
def my_net():
return ipu.loops.repeat(num_iterations, body, [], infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
def dequeue(result):
while len(result) != 1000:
r = sess.run(outfed)
if r.size:
result.extend(list(r.flatten()))
sess.run(infeed_queue.initializer)
r = []
dequeue_thread = Thread(target=dequeue, args=[r])
dequeue_thread.start()
sess.run(res)
dequeue_thread.join()
self.assertAllClose(r, range(0, 1000))
@test_util.deprecated_graph_mode_only
def testInfeedOutfeedContinuousDequeuingGetLastBeforeEnqueued(self):
num_iterations = 1000
dataset = tu.create_single_increasing_dataset(num_iterations, shape=[1])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(x):
return outfeed_queue.enqueue(x)
def my_net():
return ipu.loops.repeat(num_iterations, body, [], infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
with self.assertRaisesRegex(errors.FailedPreconditionError,
r'Trying to get the last value from an'):
sess.run(outfed)
sess.run(res)
@test_util.deprecated_graph_mode_only
def testOutfeedDeleteBeforeExecuteShouldRaiseException(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"delete_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
delete_op = outfeed_queue.deleter
with session_lib.Session() as sess:
with self.assertRaisesRegex(errors_impl.NotFoundError,
"Outfeed with id='delete_name'"):
sess.run(delete_op)
@test_util.deprecated_graph_mode_only
def testOutfeedNameCanBeReusedAfterDeletion(self):
for _ in range(2):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue = ipu.ipu_compiler.compile(outfeed_queue.enqueue, inputs=[1.0])
dequeue = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(enqueue)
self.assertEqual(1.0, sess.run(dequeue))
sess.run(outfeed_queue.deleter)
@test_util.deprecated_graph_mode_only
def testOutfeedNameCanBeReusedWithSameShape(self):
with session_lib.Session() as sess:
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue1 = ipu.ipu_compiler.compile(outfeed_queue1.enqueue,
inputs=[1.0])
dequeue1 = outfeed_queue1.dequeue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue2 = ipu.ipu_compiler.compile(outfeed_queue2.enqueue,
inputs=[2.0])
dequeue2 = outfeed_queue2.dequeue()
sess.run(enqueue1)
self.assertEqual(1.0, sess.run(dequeue1))
sess.run(enqueue2)
self.assertEqual(2.0, sess.run(dequeue2))
sess.run(outfeed_queue1.deleter)
# Can only deregister it once
with self.assertRaisesRegex(errors.NotFoundError,
"Outfeed with id='reuse_name'"):
sess.run(outfeed_queue2.deleter)
@test_util.deprecated_graph_mode_only
def testOutfeedNameCannotBeReusedWithDifferentShape(self):
with session_lib.Session() as sess:
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue1 = ipu.ipu_compiler.compile(outfeed_queue1.enqueue,
inputs=[1.0])
dequeue1 = outfeed_queue1.dequeue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue2 = ipu.ipu_compiler.compile(outfeed_queue2.enqueue,
inputs=[[1.0, 1.0]])
sess.run(enqueue1)
self.assertEqual(1.0, sess.run(dequeue1))
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Outfeed with id='reuse_name' already exists but with a different"):
sess.run(enqueue2)
sess.run(outfeed_queue1.deleter)
@test_util.deprecated_graph_mode_only
def testOutfeedNameCannotBeReusedWithDifferentType(self):
with session_lib.Session() as sess:
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue1 = ipu.ipu_compiler.compile(outfeed_queue1.enqueue,
inputs=[1.0])
dequeue1 = outfeed_queue1.dequeue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
"reuse_name", outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
with ipu.scopes.ipu_scope("/device:IPU:0"):
enqueue2 = ipu.ipu_compiler.compile(outfeed_queue2.enqueue,
inputs=[[1]])
sess.run(enqueue1)
self.assertEqual(1.0, sess.run(dequeue1))
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Outfeed with id='reuse_name' already exists but with a different"):
sess.run(enqueue2)
sess.run(outfeed_queue1.deleter)
@test_util.deprecated_graph_mode_only
def testInfeedOutfeedScalarPrefetchAndBuffer(self):
number_of_batches = 4
num_iterations = 100
dataset = tu.create_single_increasing_dataset(num_iterations, shape=[])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, next_feed_id(), data_to_prefetch=number_of_batches)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
next_feed_id(), io_batch_size=number_of_batches)
def body(a):
outfeed = outfeed_queue.enqueue(a)
return outfeed
def my_net():
r = ipu.loops.repeat(num_iterations, body, infeed_queue=infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(res)
outfed = sess.run(outfeed)
self.assertAllClose(outfed, range(num_iterations))
@test_util.deprecated_graph_mode_only
def testCannotFeedInt64(self):
dataset = dataset_ops.Dataset.range(5)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def body(v, x):
v = v + math_ops.cast(x, np.int32)
return v
def my_net():
r = ipu.loops.repeat(5, body, (0,), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Unsupprted datatype int64 on index 0 of feed operation"):
sess.run(infeed_queue.initializer)
@test_util.deprecated_graph_mode_only
def testFeedBools(self):
left = [False, False, True, True]
right = [False, True, False, True]
dataset = dataset_ops.Dataset.from_tensor_slices((left, right))
dataset = dataset.batch(2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(next_feed_id())
def body(l, r):
return outfeed_queue.enqueue(math_ops.logical_and(l, r))
def my_net():
return ipu.loops.repeat(2, body, infeed_queue=infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
dequeued = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(res)
out = sess.run(dequeued)
self.assertAllEqual(np.logical_and(left, right), np.concatenate(out))
@test_util.deprecated_graph_mode_only
def testHashTableInDataPipeline(self):
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], np.int32)
table = lookup_ops.StaticHashTableV1(
initializer=lookup_ops.KeyValueTensorInitializer(keys, values),
default_value=-1)
dataset = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery".split()])
dataset = dataset.map(table.lookup)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset, next_feed_id())
def my_net():
return infeed_queue._dequeue() # pylint: disable=protected-access
with ipu.scopes.ipu_scope("/device:IPU:0"):
[res] = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(table.initializer)
self.assertAllEqual([0, 0, -1, 1, 2], sess.run(res))
if __name__ == "__main__":
googletest.main()
|
TDMqttClient.py
|
#Copyright (C) 2021,2022 Andrew Palardy
#See LICENSE file for complete license terms
#MqttClient class
#Implements management code for Paho MQTT client
from paho.mqtt import client as mqtt_client
import json
import threading
class TDMqttClient():
#Create camera decoder with a config dictionary
def __init__(self, MqttConfig):
#Address of broker
self.Broker = MqttConfig.get('broker')
#Port of broker
self.Port = MqttConfig.get('port',1883)
#Prefix
self.Prefix = MqttConfig.get('prefix','storagetags')
#client ID
self.ClientID = MqttConfig.get('client_id','StorageTags')
#Uname and password
self.Uname = MqttConfig.get('username')
self.Pword = MqttConfig.get('password')
#Make sure that none of the configuration is invalid
if(self.Broker is None):
print("MQTT: Error: MQTT broker address is invalid")
exit()
#Start the broker
self.Client = mqtt_client.Client(self.ClientID)
#If Username is none, skip auth
if(self.Uname is not None):
if(self.Pword is None):
print("MQTT: Error: Username is valid, but Password is None")
print("MQTT: Not using authentication")
else:
self.Client.username_ps_set(self.Uname,self.Pword)
#On connect function
self.Client.on_connect = self.on_connect
#Publish LWT
self.LWTopic = self.Prefix+"/status"
self.Client.will_set(self.LWTopic,"OFFLINE",0,True)
#Start the broker
self.Client.connect(self.Broker,self.Port)
#Start a new task to deal with MQTT continuous processing
self.Thread = threading.Thread(target=self.task,name='MQTT',args=())
self.Thread.start()
#Task function
def task(self):
print("MQTT: Started Task")
#Call client loop forever
self.Client.loop_forever()
#On Connect function
def on_connect(self,client,userdata,flags,rc):
print("MQTT: Connected")
#Publish status online
self.Client.publish(self.LWTopic,"ONLINE",0,True)
#Update subscriptions here
#Function to terminate
def stop(self):
print("MQTT: Terminating")
#Publish the disconnection message
self.Client.publish(self.LWTopic,"OFFLINE",0,True)
self.Client.disconnect()
#Publish function
def publish(self,topic,payload,qos=0,retain=True):
#Append topic to prefix and call Paho
self.Client.publish(self.Prefix+"/"+topic,payload,qos,retain)
|
sender.py
|
# This class is responsible for handling all asynchronous Logz.io's
# communication
import sys
import json
from time import sleep
from datetime import datetime
from threading import Thread, enumerate
import requests
from .logger import get_logger
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
MAX_BULK_SIZE_IN_BYTES = 1 * 1024 * 1024 # 1 MB
def backup_logs(logs, logger):
timestamp = datetime.now().strftime('%d%m%Y-%H%M%S')
logger.info(
'Backing up your logs to logzio-failures-%s.txt', timestamp)
with open('logzio-failures-{}.txt'.format(timestamp), 'a') as f:
f.writelines('\n'.join(logs))
class LogzioSender:
def __init__(self,
token, url='https://listener.logz.io:8071',
logs_drain_timeout=5,
debug=False,
backup_logs=True):
self.token = token
self.url = '{}/?token={}'.format(url, token)
self.logs_drain_timeout = logs_drain_timeout
self.logger = get_logger(debug)
self.backup_logs = backup_logs
# Function to see if the main thread is alive
self.is_main_thread_active = lambda: any(
(i.name == 'MainThread') and i.is_alive() for i in enumerate())
# Create a queue to hold logs
self.queue = queue.Queue()
self._initialize_sending_thread()
def _initialize_sending_thread(self):
self.sending_thread = Thread(target=self._drain_queue)
self.sending_thread.daemon = False
self.sending_thread.name = 'logzio-sending-thread'
self.sending_thread.start()
def append(self, logs_message):
if not self.sending_thread.is_alive():
self._initialize_sending_thread()
# Queue lib is thread safe, no issue here
self.queue.put(json.dumps(logs_message))
def flush(self):
self._flush_queue()
def _drain_queue(self):
last_try = False
while not last_try:
# If main is exited, we should run one last time and try to remove
# all logs
if not self.is_main_thread_active():
self.logger.debug(
'Identified quit of main thread, sending logs one '
'last time')
last_try = True
try:
self._flush_queue()
except Exception as e:
self.logger.debug(
'Unexpected exception while draining queue to Logz.io, '
'swallowing. Exception: %s', e)
if not last_try:
sleep(self.logs_drain_timeout)
def _flush_queue(self):
# Sending logs until queue is empty
while not self.queue.empty():
logs_list = self._get_messages_up_to_max_allowed_size()
self.logger.debug(
'Starting to drain %s logs to Logz.io', len(logs_list))
# Not configurable from the outside
sleep_between_retries = 2
number_of_retries = 4
should_backup_to_disk = True
headers = {"Content-type": "text/plain"}
for current_try in range(number_of_retries):
should_retry = False
try:
response = requests.post(
self.url, headers=headers, data='\n'.join(logs_list))
if response.status_code != 200:
if response.status_code == 400:
self.logger.info(
'Got 400 code from Logz.io. This means that '
'some of your logs are too big, or badly '
'formatted. response: %s', response.text)
should_backup_to_disk = False
break
if response.status_code == 401:
self.logger.info(
'You are not authorized with Logz.io! Token '
'OK? dropping logs...')
should_backup_to_disk = False
break
else:
self.logger.info(
'Got %s while sending logs to Logz.io, '
'Try (%s/%s). Response: %s',
response.status_code,
current_try + 1,
number_of_retries,
response.text)
should_retry = True
else:
self.logger.debug(
'Successfully sent bulk of %s logs to '
'Logz.io!', len(logs_list))
should_backup_to_disk = False
break
except Exception as e:
self.logger.error(
'Got exception while sending logs to Logz.io, '
'Try (%s/%s). Message: %s',
current_try + 1, number_of_retries, e)
should_retry = True
if should_retry:
sleep(sleep_between_retries)
sleep_between_retries *= 2
if should_backup_to_disk and self.backup_logs:
# Write to file
self.logger.info(
'Could not send logs to Logz.io after %s tries, '
'backing up to local file system', number_of_retries)
backup_logs(logs_list, self.logger)
def _get_messages_up_to_max_allowed_size(self):
logs_list = []
current_size = 0
while not self.queue.empty():
current_log = self.queue.get()
current_size += sys.getsizeof(current_log)
logs_list.append(current_log)
if current_size >= MAX_BULK_SIZE_IN_BYTES:
break
return logs_list
|
__init__.py
|
#!/usr/bin/env python3
# vim: sw=4:ts=4:et:cc=120
__version__ = '1.0.15'
__doc__ = """
Yara Scanner
============
A wrapper around the yara library for Python. ::
scanner = YaraScanner()
# start tracking this yara file
scanner.track_yara_file('/path/to/yara_file.yar')
scanner.load_rules()
scanner.scan('/path/to/file/to/scan')
# check to see if your yara file changed
if scanner.check_rules():
scanner.load_rules()
# track an entire directory of yara files
scanner.track_yara_dir('/path/to/directory')
scanner.load_rules()
# did any of the yara files in this directory change?
if scanner.check_rules():
scanner.load_rules()
# track a git repository of yara rules
scanner.track_yara_repo('/path/to/git_repo')
scanner.load_rules()
# this only returns True if a new commit was added since the last check
if scanner.check_rules():
scanner.load_rules()
"""
import datetime
import functools
import json
import logging
import multiprocessing
import os
import os.path
import pickle
import random
import re
import shutil
import signal
import socket
import struct
import threading
import time
import traceback
from subprocess import PIPE, Popen
import plyara
import yara
# keys to the JSON dicts you get back from YaraScanner.scan_results
RESULT_KEY_TARGET = 'target'
RESULT_KEY_META = 'meta'
RESULT_KEY_NAMESPACE = 'namespace'
RESULT_KEY_RULE = 'rule'
RESULT_KEY_STRINGS = 'strings'
RESULT_KEY_TAGS = 'tags'
ALL_RESULT_KEYS = [
RESULT_KEY_TARGET,
RESULT_KEY_META,
RESULT_KEY_NAMESPACE,
RESULT_KEY_RULE,
RESULT_KEY_STRINGS,
RESULT_KEY_TAGS,
]
yara.set_config(max_strings_per_rule=30720)
log = logging.getLogger('yara-scanner')
def get_current_repo_commit(repo_dir):
"""Utility function to return the current commit hash for a given repo directory. Returns None on failure."""
p = Popen(['git', '-C', repo_dir, 'log', '-n', '1', '--format=oneline'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
commit, stderr= p.communicate()
p.wait()
if len(stderr.strip()) > 0:
log.error("git reported an error: {}".format(stderr.strip()))
if len(commit) < 40:
log.error("got {} for stdout with git log".format(commit.strip()))
return None
return commit[0:40]
class YaraJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, bytes):
return o.decode('utf-8', errors='backslashreplace')
return json.JSONEncoder.default(self, o)
class YaraScanner(object):
"""
The primary object used for scanning files and data with yara rules."""
def __init__(self, signature_dir=None, thread_count=None, test_mode=False):
"""
Creates a new YaraScanner object.
:param signature_dir: A directory that contains one directory per set of yara rules. Each subdirectory will get loaded into its own namespace (named after the path to the directory.) This is for convenience. Also see :func:`YaraScanner.track_yara_file`, :func:`YaraScanner.track_yara_dir`, and :func:`YaraScanner.track_yara_repo`.
:type signature_dir: str or None
:param thread_count: Number of threads to use. *This parameter is ignored and no longer used.*
:param test_mode: When set to True, each yara rule is tested for performance issued when it is loaded.
:type test_mode: bool
"""
self.rules = None
self._scan_results = []
# we can pass in a list of "blacklisted" rules
# this is a list of rule NAMES that are essentially ignored in the scan results (not output)
self._blacklisted_rules = set()
# we keep track of when the rules change and (optionally) automatically re-load the rules
self.tracked_files = {} # key = file_path, value = last modification time
self.tracked_dirs = {} # key = dir_path, value = {} (key = file_path, value = last mtime)
self.tracked_repos = {} # key = dir_path, value = current git commit
# both parameters to this function are for backwards compatibility
if thread_count is not None:
log.warning("thread_count is no longer used in YaraScanner.__init__")
# if we are in test mode, we test each yara file as it is loaded for performance issues
self.test_mode = test_mode
if signature_dir is not None:
for dir_path in os.listdir(signature_dir):
dir_path = os.path.join(signature_dir, dir_path)
if not os.path.isdir(dir_path):
continue
if os.path.exists(os.path.join(dir_path, '.git')):
self.track_yara_repository(dir_path)
else:
self.track_yara_dir(dir_path)
@property
def scan_results(self):
"""Returns the scan results of the most recent scan.
This function returns a list of dict with the following format ::
{
'target': str,
'meta': dict,
'namespace': str,
'rule': str,
'strings': list,
'tags': list,
}
**target** is the target of the scane. In the case of file scans then target will be the path to the file that was scanned. In the case of data (raw binary) scans, this will be an empty string.
**meta** is the dict of meta directives of the matching rule.
**namespace** is the namespace the rule is in. In the case of repo and directory tracking, this will be the path of the directory. Otherwise it has a hard coded value of DEFAULT. *Setting the namespace to the path of the directory allows yara rules with duplicate names in different directories to be added to the same yara context.*
**rule** is the name of the matching yara rule.
**strings** is a list of tuples representing the individual string matches in the following format. ::
(position, string_name, content)
where **position** is the byte position of the match, **string_name** is the name of the yara string that matched, and **content** is the binary content it matched.
**tags** is a list of tags contained in the matching rule.
"""
return self._scan_results
@scan_results.setter
def scan_results(self, value):
self._scan_results = value
@property
def blacklist(self):
"""The list of yara rules configured as blacklisted. Rules that are blacklisted are not compiled and used."""
return list(self._blacklisted_rules)
@blacklist.setter
def blacklist(self, value):
assert isinstance(value, list)
self._blacklisted_rules = set(value)
def blacklist_rule(self, rule_name):
"""Adds the given rule to the list of rules that are blacklisted. See :func:`YaraScanner.blacklist`."""
self._blacklisted_rules.add(rule_name)
@property
def json(self):
"""Returns the current scan results as a JSON formatted string."""
return json.dumps(self.scan_results, indent=4, sort_keys=True, cls=YaraJSONEncoder)
@functools.lru_cache()
def git_available(self):
"""Returns True if git is available on the system, False otherwise."""
return shutil.which('git')
def track_yara_file(self, file_path):
"""Adds a single yara file. The file is then monitored for changes to mtime, removal or adding."""
if not os.path.exists(file_path):
self.tracked_files[file_path] = None # file did not exist when we started tracking
# we keep track of the path by keeping the key in the dictionary
# so that if the file comes back we'll reload it
else:
self.tracked_files[file_path] = os.path.getmtime(file_path)
log.debug("yara file {} tracked @ {}".format(file_path, self.tracked_files[file_path]))
def track_yara_dir(self, dir_path):
"""Adds all files in a given directory that end with .yar when converted to lowercase. All files are monitored for changes to mtime, as well as new and removed files."""
if not os.path.isdir(dir_path):
log.error("{} is not a directory".format(dir_path))
return
self.tracked_dirs[dir_path] = {}
for file_path in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_path)
if file_path.lower().endswith('.yar') or file_path.lower().endswith('.yara'):
self.tracked_dirs[dir_path][file_path] = os.path.getmtime(file_path)
log.debug("tracking file {} @ {}".format(file_path, self.tracked_dirs[dir_path][file_path]))
log.debug("tracking directory {} with {} yara files".format(dir_path, len(self.tracked_dirs[dir_path])))
def track_yara_repository(self, dir_path):
"""Adds all files in a given directory **that is a git repository** that end with .yar when converted to lowercase. Only commits to the repository trigger rule reload."""
if not self.git_available():
log.warning("git cannot be found: defaulting to track_yara_dir")
return self.track_yara_dir(dir_path)
if not os.path.isdir(dir_path):
log.error("{} is not a directory".format(dir_path))
return False
if not os.path.exists(os.path.join(dir_path, '.git')):
log.error("{} is not a git repository (missing .git)".format(dir_path))
return False
# get the initial commit of this directory
self.tracked_repos[dir_path] = get_current_repo_commit(dir_path)
log.debug("tracking git repo {} @ {}".format(dir_path, self.tracked_repos[dir_path]))
def check_rules(self):
"""
Returns True if the rules need to be recompiled, False otherwise. The criteria that determines if the rules are recompiled depends on how they are tracked.
:rtype: bool"""
reload_rules = False # final result to return
for file_path in self.tracked_files.keys():
# did the file come back?
if self.tracked_files[file_path] is None and os.path.exists(file_path):
log.info(f"detected recreated yara file {file_path}")
self.track_yara_file(file_path)
# was the file deleted?
elif self.tracked_files[file_path] is not None and not os.path.exists(file_path):
log.info(f"detected deleted yara file {file_path}")
self.track_yara_file(file_path)
reload_rules = True
# was the file modified?
elif os.path.getmtime(file_path) != self.tracked_files[file_path]:
log.info(f"detected change in yara file {file_path}")
self.track_yara_file(file_path)
reload_rules = True
for dir_path in self.tracked_dirs.keys():
reload_dir = False # set to True if we need to reload this directory
existing_files = set() # keep track of the ones we see
for file_path in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_path)
if not ( file_path.lower().endswith('.yar') or file_path.lower().endswith('.yara') ):
continue
existing_files.add(file_path)
if file_path not in self.tracked_dirs[dir_path]:
log.info("detected new yara file {} in {}".format(file_path, dir_path))
reload_dir = True
reload_rules = True
elif os.path.getmtime(file_path) != self.tracked_dirs[dir_path][file_path]:
log.info("detected change in yara file {} dir {}".format(file_path, dir_path))
reload_dir = True
reload_rules = True
# did a file get deleted?
for file_path in self.tracked_dirs[dir_path].keys():
if file_path not in existing_files:
log.info("detected deleted yara file {} in {}".format(file_path, dir_path))
reload_dir = True
reload_rules = True
if reload_dir:
self.track_yara_dir(dir_path)
for repo_path in self.tracked_repos.keys():
current_repo_commit = get_current_repo_commit(repo_path)
#log.debug("repo {} current commit {} tracked commit {}".format(
#repo_path, self.tracked_repos[repo_path], current_repo_commit))
if current_repo_commit != self.tracked_repos[repo_path]:
log.info("detected change in git repo {}".format(repo_path))
self.track_yara_repository(repo_path)
reload_rules = True
# if we don't have a yara context yet then we def need to compile the rules
if self.rules is None:
return True
return reload_rules
def load_rules(self):
"""
Loads and compiles all tracked yara rules. Returns True if the rules were loaded correctly, False otherwise.
Scans can be performed only after the rules are loaded.
:rtype: bool"""
# load and compile the rules
# we load all the rules into memory as a string to be compiled
sources = {}
rule_count = 0
file_count = 0
# get the list of all the files to compile
all_files = {} # key = "namespace", value = [] of file_paths
# XXX there's a bug in yara where using an empty string as the namespace causes a segfault
all_files['DEFAULT'] = [ _ for _ in self.tracked_files.keys() if self.tracked_files[_] is not None]
file_count += len(all_files['DEFAULT'])
for dir_path in self.tracked_dirs.keys():
all_files[dir_path] = self.tracked_dirs[dir_path]
file_count += len(self.tracked_dirs[dir_path])
for repo_path in self.tracked_repos.keys():
all_files[repo_path] = []
for file_path in os.listdir(repo_path):
file_path = os.path.join(repo_path, file_path)
if file_path.lower().endswith('.yar') or file_path.lower().endswith('.yara'):
all_files[repo_path].append(file_path)
file_count += 1
# if we have no files to compile then we have nothing to do
if file_count == 0:
logging.debug("no files to compile")
self.rules = None
return False
if self.test_mode:
execution_times = [] # of (total_seconds, buffer_type, file_name, rule_name)
execution_errors = [] # of (error_message, buffer_type, file_name, rule_name)
random_buffer = os.urandom(1024 * 1024) # random data to scan
for namespace in all_files.keys():
for file_path in all_files[namespace]:
with open(file_path, 'r') as fp:
log.debug("loading namespace {} rule file {}".format(namespace, file_path))
data = fp.read()
try:
# compile the file as a whole first, make sure that works
rule_context = yara.compile(source=data)
rule_count += 1
except Exception as e:
log.error("unable to compile {}: {}".format(file_path, str(e)))
continue
if self.test_mode:
parser = plyara.Plyara()
parsed_rules = { } # key = rule_name, value = parsed_yara_rule
for parsed_rule in parser.parse_string(data):
parsed_rules[parsed_rule['rule_name']] = parsed_rule
for rule_name in parsed_rules.keys():
# some rules depend on other rules, so we deal with that here
dependencies = [] # list of rule_names that this rule needs
rule_context = None
while True:
# compile all the rules we've collected so far as one
dep_source = '\n'.join([parser.rebuild_yara_rule(parsed_rules[r])
for r in dependencies])
try:
rule_context = yara.compile(source='{}\n{}'.format(dep_source,
parser.rebuild_yara_rule(parsed_rules[rule_name])))
break
except Exception as e:
# some rules depend on other rules
m = re.search(r'undefined identifier "([^"]+)"', str(e))
if m:
dependency = m.group(1)
if dependency in parsed_rules:
# add this rule to the compilation and try again
dependencies.insert(0, dependency)
continue
log.warning("rule {} in file {} does not compile by itself: {}".format(
rule_name, file_path, e))
rule_context = None
break
if not rule_context:
continue
if dependencies:
log.info("testing {}:{},{}".format(file_path, rule_name, ','.join(dependencies)))
else:
log.info("testing {}:{}".format(file_path, rule_name))
start_time = time.time()
try:
rule_context.match(data=random_buffer, timeout=5)
end_time = time.time()
total_seconds = end_time - start_time
execution_times.append((total_seconds, 'random', file_path, rule_name))
except Exception as e:
execution_errors.append((str(e), 'random', file_name))
for x in range(255):
byte_buffer = bytes([x]) * (1024 * 1024)
start_time = time.time()
try:
rule_context.match(data=byte_buffer, timeout=5)
end_time = time.time()
total_seconds = end_time - start_time
execution_times.append((total_seconds, 'byte({})'.format(x), file_path, rule_name))
except Exception as e:
execution_errors.append((str(e), 'byte({})'.format(x), file_path, rule_name))
# if we fail once we break out
break
# then we just store the source to be loaded all at once in the compilation that gets used
if namespace not in sources:
sources[namespace] = []
sources[namespace].append(data)
if self.test_mode:
execution_times = sorted(execution_times, key=lambda x: x[0])
for execution_time, buffer_type, file_path, yara_rule in execution_times:
print("{}:{} <{}> {}".format(file_path, yara_rule, buffer_type, execution_time))
for error_message, buffer_type, file_path, yara_rule in execution_errors:
print("{}:{} <{}> {}".format(file_path, yara_rule, buffer_type, error_message))
return False
for namespace in sources.keys():
sources[namespace] = '\r\n'.join(sources[namespace])
try:
log.info("loading {} rules".format(rule_count))
self.rules = yara.compile(sources=sources)
return True
except Exception as e:
log.error("unable to compile all yara rules combined: {}".format(str(e)))
self.rules = None
return False
# we're keeping things backwards compatible here...
def scan(self,
file_path,
yara_stdout_file=None,
yara_stderr_file=None,
external_vars={}):
"""
Scans the given file with the loaded yara rules. Returns True if at least one yara rule matches, False otherwise.
The ``scan_results`` property will contain the results of the scan.
:param file_path: The path to the file to scan.
:type file_path: str
:param yara_stdout_file: Ignored.
:param yara_stderr_file: Ignored.
:external_vars: dict of variables to pass to the scanner as external yara variables (typically used in the condition of the rule.)
:type external_vars: dict
:rtype: bool
"""
assert self.rules is not None
# scan the file
# external variables come from the profile points added to the file
yara_matches = self.rules.match(file_path, externals=external_vars, timeout=5)
return self._scan(file_path, None, yara_matches, yara_stdout_file, yara_stderr_file, external_vars)
def scan_data(self,
data,
yara_stdout_file=None,
yara_stderr_file=None,
external_vars={}):
"""
Scans the given data with the loaded yara rules. ``data`` can be either a str or bytes object. Returns True if at least one yara rule matches, False otherwise.
The ``scan_results`` property will contain the results of the scan.
:param data: The data to scan.
:type data: str or bytes
:param yara_stdout_file: Ignored.
:param yara_stderr_file: Ignored.
:external_vars: dict of variables to pass to the scanner as external yara variables (typically used in the condition of the rule.)
:type external_vars: dict
:rtype: bool
"""
assert self.rules is not None
# scan the data stream
# external variables come from the profile points added to the file
yara_matches = self.rules.match(data=data, externals=external_vars, timeout=5)
return self._scan(None, data, yara_matches, yara_stdout_file, yara_stderr_file, external_vars)
def _scan(self,
file_path,
data,
yara_matches,
yara_stdout_file=None,
yara_stderr_file=None,
external_vars={}):
# if we didn't specify a file_path then we default to an empty string
# that will be the case when we are scanning a data chunk
if file_path is None:
file_path = ''
# the mime type of the file
# we'll figure it out if we need to
mime_type = None
# the list of matches after we filter
self.scan_results = []
for match_result in yara_matches:
skip = False # state flag
# is this a rule we've blacklisted?
if match_result.rule in self.blacklist:
log.debug("rule {} is blacklisted".format(match_result.rule))
continue
for directive in match_result.meta:
value = match_result.meta[directive]
# everything we're looking for is a string
if not isinstance(value, str):
continue
# you can invert the logic by starting the value with !
inverted = False
if value.startswith('!'):
value = value[1:]
inverted = True
# you can use regex by starting string with re: (after optional negation)
use_regex = False
if value.startswith('re:'):
value = value[3:]
use_regex = True
# or you can use substring matching with sub:
use_substring = False
if value.startswith('sub:'):
value = value[4:]
use_substring = True
# figure out what we're going to compare against
compare_target = None
if directive.lower() == 'file_ext':
if '.' not in file_path:
compare_target = ''
else:
compare_target = file_path.rsplit('.', maxsplit=1)[1]
elif directive.lower() == 'mime_type':
# have we determined the mime type for this file yet?
if mime_type is None:
if not file_path:
mime_type = ''
else:
p = Popen(['file', '-b', '--mime-type', file_path], stdout=PIPE)
mime_type = p.stdout.read().decode().strip()
log.debug("got mime type {} for {}".format(mime_type, file_path))
compare_target = mime_type
elif directive.lower() == 'file_name':
compare_target = os.path.basename(file_path)
elif directive.lower() == 'full_path':
compare_target = file_path
else:
# not a meta tag we're using
#log.debug("not a valid meta directive {}".format(directive))
continue
log.debug("compare target is {} for directive {}".format(compare_target, directive))
# figure out how to compare what is supplied by the user to the search target
if use_regex:
compare_function = lambda user_supplied, target: re.search(user_supplied, target)
elif use_substring:
compare_function = lambda user_supplied, target: user_supplied in target
else:
compare_function = lambda user_supplied, target: user_supplied.lower() == target.lower()
matches = False
for search_item in [x.strip() for x in value.lower().split(',')]:
matches = matches or compare_function(search_item, compare_target)
#log.debug("search item {} vs compare target {} matches {}".format(search_item, compare_target, matches))
if ( inverted and matches ) or ( not inverted and not matches ):
log.debug("skipping yara rule {} for file {} directive {} list {} negated {} regex {} subsearch {}".format(
match_result.rule, file_path, directive, value, inverted, use_regex, use_substring))
skip = True
break # we are skipping so we don't need to check anything else
if not skip:
self.scan_results.append(match_result)
# get rid of the yara object and just return dict
# also includes a "target" (reference to what was scanned)
self.scan_results = [{
'target': file_path,
'meta': o.meta,
'namespace': o.namespace,
'rule': o.rule,
'strings': o.strings,
'tags': o.tags } for o in self.scan_results]
# this is for backwards compatible support
if yara_stdout_file is not None:
try:
with open(yara_stdout_file, 'w') as fp:
json.dump(self.scan_results, fp, indent=4, sort_keys=True)
except Exception as e:
log.error("unable to write to {}: {}".format(yara_stdout_file, str(e)))
return self.has_matches
@property
def has_matches(self):
return len(self.scan_results) != 0
# typically you might want to start a process, load the rules, then fork() for each client to scan
# the idea being the each child process will be reusing the same yara rules loaded in memory
# in practice, the yara rules compile into some kind of huge blob inside libyara
# and the amount of time it takes the kernel to the clone() seems to gradually increase as a result of that
# so the rules are loaded into each process and are reused until re-loaded
#
# each scanner listens on a local unix socket for new things to scan
# once connected the following protocol is observed
# client sends one byte with the following possible values
# 1) what follows is a data stream
# 2) what follows is a file path
# in either case the client sends an unsigned integer in network byte order
# that is the size of the following data (either data stream or file name)
# finally the client sends another unsigned integer in network byte order
# followed by a JSON hash of all the external variables to define for the scan
# a size of 0 would indicate an empty JSON file
#
# once received the scanner will scan the data (or the file) and submit a result back to the client
# the result will be a data block with one of the following values
# * an empty block meaning no matches
# * a pickled exception for yara scanning failures
# * a pickled result dictionary
# then the server will close the connection
COMMAND_FILE_PATH = b'1'
COMMAND_DATA_STREAM = b'2'
DEFAULT_BASE_DIR = '/opt/yara_scanner'
DEFAULT_SIGNATURE_DIR = '/opt/signatures'
DEFAULT_SOCKET_DIR = 'socket'
class YaraScannerServer(object):
def __init__(self, base_dir=DEFAULT_BASE_DIR, signature_dir=DEFAULT_SIGNATURE_DIR, socket_dir=DEFAULT_SOCKET_DIR,
update_frequency=60, backlog=50):
# set to True to gracefully shutdown
self.shutdown = multiprocessing.Event()
# set to True to gracefully shutdown the current scanner (used for reloading)
self.current_scanner_shutdown = None # threading.Event
# primary scanner controller
self.process_manager = None
# list of YaraScannerServer Process objects
# there will be one per cpu available as returned by multiprocessing.cpu_count()
self.servers = [None for _ in range(multiprocessing.cpu_count())]
# base directory of yara scanner
self.base_dir = base_dir
# the directory that contains the signatures to load
self.signature_dir = signature_dir
# the directory that contains the unix sockets
self.socket_dir = socket_dir
# how often do we check to see if the yara rules changed? (in seconds)
self.update_frequency = update_frequency
# parameter to the socket.listen() function (how many connections to backlog)
self.backlog = backlog
#
# the following variables are specific to the child proceses
#
# the "cpu index" of this process (used to determine the name of the unix socket)
self.cpu_index = None
# the path to the unix socket this process is using
self.socket_path = None
# the socket we are listening on for scan requests
self.server_socket = None
# the scanner we're using for this process
self.scanner = None
# set to True when we receive a SIGUSR1
self.sigusr1 = False
#
# scanning processes die when they need to reload rules
# this is due to what seems like a minor memory leak in the yara python library
# so this process just watches for dead scanners and restarts them if the system isn't stopping
#
def run_process_manager(self):
def _handler(signum, frame):
self.shutdown.set()
signal.signal(signal.SIGTERM, _handler)
signal.signal(signal.SIGINT, _handler)
try:
while not self.shutdown.is_set():
try:
self.execute_process_manager()
time.sleep(0.1)
except Exception as e:
log.error("uncaught exception: {}".format(e))
time.sleep(1)
except KeyboardInterrupt:
pass
# wait for all the scanners to die...
for server in self.servers:
if server:
log.info("waiting for scanner {} to exit...".format(server.pid))
server.join()
log.info("exiting")
def execute_process_manager(self):
for i, p in enumerate(self.servers):
if self.servers[i] is not None:
if not self.servers[i].is_alive():
log.info("detected dead scanner {}".format(self.servers[i].pid))
self.servers[i].join()
self.servers[i] = None
for i, scanner in enumerate(self.servers):
if scanner is None:
logging.info("starting scanner on cpu {}".format(i))
self.servers[i] = multiprocessing.Process(target=self.run, name="Yara Scanner Server ({})".format(i), args=(i,))
self.servers[i].start()
log.info("started scanner on cpu {} with pid {}".format(i, self.servers[i].pid))
def initialize_server_socket(self):
self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.server_socket.settimeout(1)
# the path of the unix socket will be socket_dir/cpu_index where cpu_index >= 0
self.socket_path = os.path.join(self.base_dir, self.socket_dir, str(self.cpu_index))
log.info("initializing server socket on {}".format(self.socket_path))
if os.path.exists(self.socket_path):
try:
os.remove(self.socket_path)
except Exception as e:
log.error("unable to remove {}: {}".format(self.socket_path, e))
self.server_socket.bind(self.socket_path)
self.server_socket.listen(self.backlog)
def kill_server_socket(self):
if self.server_socket is None:
return
try:
log.info("closing server socket")
self.server_socket.close()
except Exception as e:
log.error("unable to close server socket: {}".format(e))
self.server_socket = None
if os.path.exists(self.socket_path):
try:
os.remove(self.socket_path)
except Exception as e:
logging.error("unable to remove {}: {}".format(self.socket_path, e))
def initialize_scanner(self):
log.info("initializing scanner")
new_scanner = YaraScanner(signature_dir=self.signature_dir)
new_scanner.load_rules()
self.scanner = new_scanner
def start(self):
self.process_manager = multiprocessing.Process(target=self.run_process_manager)
self.process_manager.start()
log.info("started process manager on pid {}".format(self.process_manager.pid))
def stop(self):
if not self.shutdown.is_set():
self.shutdown.set()
self.wait()
# process manager waits for the child processes to exit so we're done at this point
def wait(self, timeout=None):
log.debug("waiting for process manager to exit...")
if self.process_manager:
self.process_manager.join()
self.process_manager = None
def run(self, cpu_index):
self.cpu_index = cpu_index # starting at 0
def _handler(signum, frame):
self.current_scanner_shutdown.set()
signal.signal(signal.SIGHUP, _handler)
signal.signal(signal.SIGTERM, _handler)
signal.signal(signal.SIGINT, _handler)
self.current_scanner_shutdown = threading.Event()
try:
# load up the yara scanner
self.initialize_scanner()
# watch for the rules to change in another thread
self.start_rules_monitor()
while not self.shutdown.is_set():
try:
self.execute()
if self.current_scanner_shutdown.is_set():
log.info("got signal to reload rules: exiting...")
break
except InterruptedError:
pass
except Exception as e:
log.error("uncaught exception: {} ({})".format(e, type(e)))
except KeyboardInterrupt:
log.info("caught keyboard interrupt - exiting")
self.stop_rules_monitor()
self.kill_server_socket()
def execute(self):
# are we listening on the socket yet?
if not self.server_socket:
try:
self.initialize_server_socket()
except Exception as e:
self.kill_server_socket()
# don't spin the cpu on failing to allocate the socket
self.shutdown.wait(timeout=1)
return
# get the next client connection
try:
log.debug("waiting for client")
client_socket, _ = self.server_socket.accept()
except socket.timeout as e:
# nothing came in while we were waiting (check for shutdown and try again)
return
try:
self.process_client(client_socket)
except Exception as e:
log.error("unable to process client request: {}".format(e))
traceback.print_exc()
finally:
try:
client_socket.close()
except Exception as e:
log.error("unable to close client connection: {}".format(e))
def process_client(self, client_socket):
# read the command byte
command = client_socket.recv(1)
data_or_file = read_data_block(client_socket).decode()
ext_vars = read_data_block(client_socket)
if not ext_vars:
ext_vars = {}
else:
# parse the ext vars json
ext_vars = json.loads(ext_vars.decode())
try:
matches = False
if command == COMMAND_FILE_PATH:
log.info("scanning file {}".format(data_or_file))
matches = self.scanner.scan(data_or_file, external_vars=ext_vars)
elif command == COMMAND_DATA_STREAM:
log.info("scanning {} byte data stream".format(len(data_or_file)))
matches = self.scanner.scan_data(data_or_file, external_vars=ext_vars)
else:
log.error("invalid command {}".format(command))
return
except Exception as e:
log.info("scanning failed: {}".format(e))
send_data_block(client_socket, pickle.dumps(e))
return
if not matches:
# a data lenghth of 0 means we didn't match anything
send_data_block(client_socket, b'')
else:
# encode and submit the JSON result of the client
#print(self.scanner.scan_results)
send_data_block(client_socket, pickle.dumps(self.scanner.scan_results))
def start_rules_monitor(self):
"""Starts a thread the monitor the yara rules. When it detects the yara rules
have changed it creates a new YaraScanner and swaps it in (for self.scanner)."""
self.rule_monitor_thread = threading.Thread(target=self.rule_monitor_loop,
name='Scanner {} Rules Monitor'.format(self.cpu_index),
daemon=False)
self.rule_monitor_thread.start()
def rule_monitor_loop(self):
log.debug("starting rules monitoring")
counter = 0
while True:
if self.shutdown.is_set():
break
if self.current_scanner_shutdown.is_set():
break
if counter >= self.update_frequency:
log.debug('checking for new rules...')
# do we need to reload the yara rules?
if self.scanner.check_rules():
self.current_scanner_shutdown.set()
break
counter = 0
counter += 1
self.shutdown.wait(1)
log.debug("stopped rules monitoring")
def stop_rules_monitor(self):
self.current_scanner_shutdown.set()
self.rule_monitor_thread.join(5)
if self.rule_monitor_thread.is_alive():
log.error("unable to stop rule monitor thread")
def _scan(command, data_or_file, ext_vars={}, base_dir=DEFAULT_BASE_DIR, socket_dir=DEFAULT_SOCKET_DIR):
# pick a random scanner
# it doesn't matter which one, as long as the load is evenly distributed
starting_index = scanner_index = random.randrange(multiprocessing.cpu_count())
while True:
socket_path = os.path.join(base_dir, socket_dir, str(scanner_index))
ext_vars_json = b''
if ext_vars:
ext_vars_json = json.dumps(ext_vars).encode()
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
client_socket.connect(socket_path)
client_socket.sendall(command)
send_data_block(client_socket, data_or_file.encode())
send_data_block(client_socket, ext_vars_json)
result = read_data_block(client_socket)
if result == b'':
return {}
result = pickle.loads(result)
if isinstance(result, BaseException):
raise result
return result
except socket.error as e:
log.debug("possible restarting scanner: {}".format(e))
# in the case where a scanner is restarting (when loading rules)
# we will receive a socket error when we try to connect
# just move on to the next socket and try again
scanner_index += 1
if scanner_index >= multiprocessing.cpu_count():
scanner_index = 0
# if we've swung back around wait for a few seconds and try again
if scanner_index == starting_index:
log.error("no scanners available")
raise
continue
def scan_file(path, base_dir=None, socket_dir=DEFAULT_SOCKET_DIR, ext_vars={}):
return _scan(COMMAND_FILE_PATH, path, ext_vars=ext_vars, base_dir=base_dir, socket_dir=socket_dir)
def scan_data(data): # XXX ????
return _scan(COMMAND_DATA_STREAM, data, ext_vars=ext_vars, base_dir=base_dir, socket_dir=socket_dir)
#
# protocol routines
#
def read_n_bytes(s, n):
"""Reads n bytes from socket s. Returns the bytearray of the data read."""
bytes_read = 0
_buffer = []
while bytes_read < n:
data = s.recv(n - bytes_read)
if data == b'':
break
bytes_read += len(data)
_buffer.append(data)
result = b''.join(_buffer)
if len(result) != n:
log.warning("expected {} bytes but read {}".format(n, len(result)))
return b''.join(_buffer)
def read_data_block_size(s):
"""Reads the size of the next data block from the given socket."""
size = struct.unpack('!I', read_n_bytes(s, 4))
size = size[0]
log.debug("read command block size {}".format(size))
return size
def read_data_block(s):
"""Reads the next data block from socket s. Returns the bytearray of the data portion of the block."""
# read the size of the data block (4 byte network order integer)
size = struct.unpack('!I', read_n_bytes(s, 4))
size = size[0]
#log.debug("read command block size {}".format(size))
# read the data portion of the data block
return read_n_bytes(s, size)
def iterate_data_blocks(s):
"""Reads the next data block until a block0 is read."""
while True:
block = read_data_block(s)
if len(block) == 0:
raise StopIteration()
yield block
def send_data_block(s, data):
"""Writes the given data to the given socket as a data block."""
message = b''.join([struct.pack("!I", len(data)), data])
#log.debug("sending data block length {} ({})".format(len(message), message[:64]))
s.sendall(message)
def send_block0(s):
"""Writes an empty data block to the given socket."""
send_data_block(s, b'')
def main():
import argparse
import pprint
import sys
#from yara_scanner import YaraScanner, YaraJSONEncoder
parser = argparse.ArgumentParser(description="Scan the given file with yara using all available rulesets.")
parser.add_argument('paths', metavar='PATHS', nargs="*",
help="One or more files or directories to scan with yara.")
parser.add_argument('-r', '--recursive', required=False, default=False, action='store_true', dest='recursive',
help="Recursively scan directories.")
parser.add_argument('--from-stdin', required=False, default=False, action='store_true', dest='from_stdin',
help="Read the list of files to scan from stdin.")
parser.add_argument('--debug', dest='log_debug', default=False, action='store_true',
help="Log debug level messages.")
parser.add_argument('-j', '--dump-json', required=False, default=False, action='store_true', dest='dump_json',
help="Dump JSON details of matches. Otherwise just list the rules that hit.")
parser.add_argument('-t', '--test', required=False, default=False, action='store_true', dest='test',
help="Test each yara file separately against different types of buffers for performance issues.")
parser.add_argument('-y', '--yara-rules', required=False, default=[], action='append', dest='yara_rules',
help="One yara rule to load. You can specify more than one of these.")
parser.add_argument('-Y', '--yara-dirs', required=False, default=[], action='append', dest='yara_dirs',
help="One directory containing yara rules to load. You can specify more than one of these.")
parser.add_argument('-G', '--yara-repos', required=False, default=[], action='append', dest='yara_repos',
help="One directory that is a git repository that contains yara rules to load. You can specify more than one of these.")
parser.add_argument('-c', '--compile-only', required=False, default=False, action='store_true', dest='compile_only',
help="Compile the rules and exit.")
parser.add_argument('-b', '--blacklist', required=False, default=[], action='append', dest='blacklisted_rules',
help="A rule to blacklist (remove from the results.) You can specify more than one of these options.")
parser.add_argument('-B', '--blacklist-path', required=False, default=None, dest='blacklisted_rules_path',
help="Path to a file that contains a list of rules to blacklist, one per line.")
parser.add_argument('-d', '--signature-dir', dest='signature_dir', default=None,
help="DEPRECATED: Use a different signature directory than the default.")
args = parser.parse_args()
if len(args.yara_rules) == 0 and len(args.yara_dirs) == 0 and len(args.yara_repos) == 0 and args.signature_dir is None:
args.signature_dir = '/opt/signatures'
logging.basicConfig(level=logging.DEBUG if args.log_debug else logging.INFO)
# load any blacklisting
if args.blacklisted_rules_path is not None:
with open(args.blacklisted_rules_path, 'r') as fp:
args.blacklisted_rules.extend([x.strip() for x in fp.read().split('\n')])
scanner = YaraScanner(signature_dir=args.signature_dir, test_mode=args.test)
scanner.blacklist = args.blacklisted_rules
for file_path in args.yara_rules:
scanner.track_yara_file(file_path)
for dir_path in args.yara_dirs:
scanner.track_yara_dir(dir_path)
for repo_path in args.yara_repos:
scanner.track_yara_repository(repo_path)
scanner.load_rules()
if scanner.check_rules():
scanner.load_rules()
if args.compile_only or args.test:
sys.exit(0)
exit_result = 0
def scan_file(file_path):
global exit_result
try:
if scanner.scan(file_path):
if args.dump_json:
json.dump(scanner.scan_results, sys.stdout, sort_keys=True, indent=4, cls=YaraJSONEncoder)
else:
print(file_path)
for match in scanner.scan_results:
print('\t{}'.format(match['rule']))
except Exception as e:
log.error("scan failed for {}: {}".format(file_path, e))
exit_result = 1
def scan_dir(dir_path):
for file_path in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_path)
if os.path.isdir(file_path):
if args.recursive:
scan_dir(file_path)
else:
scan_file(file_path)
if args.from_stdin:
for line in sys.stdin:
line = line.strip()
scan_file(line)
else:
for path in args.paths:
if os.path.isdir(path):
scan_dir(path)
else:
scan_file(path)
sys.exit(exit_result)
if __name__ == '__main__':
main()
|
util.py
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
# Modifications Copyright (c) 2020 Cloudlab URV
#
import traceback
import weakref
import redis
import uuid
import logging
import sys
import threading
import io
import os
import json
import socket
from lithops.config import load_config
from . import config as mp_config
logger = logging.getLogger(__name__)
#
# Picklable redis client
#
class PicklableRedis(redis.StrictRedis):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
logger.debug('Creating picklable Redis client')
super().__init__(*self._args, **self._kwargs)
def __getstate__(self):
return self._args, self._kwargs
def __setstate__(self, state):
self.__init__(*state[0], **state[1])
def get_redis_client(**overwrites):
try:
conn_params = load_config()['redis']
except KeyError:
raise Exception('Redis section not found in you config')
conn_params.update(overwrites)
return PicklableRedis(**conn_params)
#
# Helper functions
#
def get_uuid(length=12):
return uuid.uuid1().hex[:length]
def make_stateless_script(script):
# Make stateless redis Lua script (redis.client.Script)
# Just to ensure no redis client is cache'd and avoid
# creating another connection when unpickling this object.
script.registered_client = None
return script
def export_execution_details(futures, lithops_executor):
if mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS):
try:
path = os.path.realpath(mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS))
job_id = futures[0].job_id
plots_file_name = '{}_{}'.format(lithops_executor.executor_id, job_id)
lithops_executor.plot(fs=futures, dst=os.path.join(path, plots_file_name))
stats = {fut.call_id: fut.stats for fut in futures}
stats_file_name = '{}_{}_stats.json'.format(lithops_executor.executor_id, job_id)
with open(os.path.join(path, stats_file_name), 'w') as stats_file:
stats_json = json.dumps(stats, indent=4)
stats_file.write(stats_json)
except Exception as e:
logger.error('Error while exporting execution results: {}\n{}'.format(e, traceback.format_exc()))
def get_network_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
#
# object for counting remote references (redis keys)
# and garbage collect them automatically when nothing
# is pointing at them
#
class RemoteReference:
def __init__(self, referenced, managed=False, client=None):
if isinstance(referenced, str):
referenced = [referenced]
if not isinstance(referenced, list):
raise TypeError("referenced must be a key (str) or"
"a list of keys")
self._referenced = referenced
# reference counter key
self._rck = '{}-{}'.format('ref', self._referenced[0])
self._referenced.append(self._rck)
self._client = client or get_redis_client()
self._callback = None
self.managed = managed
@property
def managed(self):
return self._callback is None
@managed.setter
def managed(self, value):
managed = value
if self._callback is not None:
self._callback.atexit = False
self._callback.detach()
if managed:
self._callback = None
else:
self._callback = weakref.finalize(self, type(self)._finalize,
self._client, self._rck, self._referenced)
def __getstate__(self):
return (self._rck, self._referenced,
self._client, self.managed)
def __setstate__(self, state):
(self._rck, self._referenced,
self._client) = state[:-1]
self._callback = None
self.managed = state[-1]
self.incref()
def incref(self):
if not self.managed:
pipeline = self._client.pipeline()
pipeline.incr(self._rck, 1)
pipeline.expire(self._rck, mp_config.get_parameter(mp_config.REDIS_EXPIRY_TIME))
counter, _ = pipeline.execute()
return int(counter)
def decref(self):
if not self.managed:
pipeline = self._client.pipeline()
pipeline.decr(self._rck, 1)
pipeline.expire(self._rck, mp_config.get_parameter(mp_config.REDIS_EXPIRY_TIME))
counter, _ = pipeline.execute()
return int(counter)
def refcount(self):
count = self._client.get(self._rck)
return 1 if count is None else int(count) + 1
def collect(self):
if len(self._referenced) > 0:
self._client.delete(*self._referenced)
self._referenced = []
@staticmethod
def _finalize(client, rck, referenced):
count = int(client.decr(rck, 1))
if count < 0 and len(referenced) > 0:
client.delete(*referenced)
#
# Remote logging
#
def setup_log_streaming(executor):
if mp_config.get_parameter(mp_config.STREAM_STDOUT):
stream = executor.executor_id
logger.debug('Log streaming enabled, stream name: {}'.format(stream))
remote_logger = RemoteLoggingFeed(stream)
remote_logger.start()
return remote_logger, stream
else:
return None, None
class RemoteLogIOBuffer:
def __init__(self, stream):
self._feeder_thread = threading
self._buff = io.StringIO()
self._redis = get_redis_client()
self._stream = stream
def write(self, log):
self._buff.write(log)
# self.flush()
self._old_stdout.write(log)
def flush(self):
log = self._buff.getvalue()
self._redis.publish(self._stream, log)
self._buff = io.StringIO()
def start(self):
import sys
self._old_stdout = sys.stdout
sys.stdout = self
logger.debug('Starting remote logging feed to stream %s', self._stream)
def stop(self):
import sys
sys.stdout = self._old_stdout
logger.debug('Stopping remote logging feed to stream %s', self._stream)
class RemoteLoggingFeed:
def __init__(self, stream):
self._logger_thread = threading.Thread(target=self._logger_monitor, args=(stream,))
self._stream = stream
self._enabled = False
def _logger_monitor(self, stream):
logger.debug('Starting logger feeder thread for stream %s', stream)
redis_pubsub = get_redis_client().pubsub()
redis_pubsub.subscribe(stream)
while self._enabled:
msg = redis_pubsub.get_message(ignore_subscribe_messages=True, timeout=1)
if msg is None:
continue
if 'data' in msg:
sys.stdout.write(msg['data'].decode('utf-8'))
logger.debug('Logger monitor thread for stream %s finished', stream)
def start(self):
# self._logger_thread.daemon = True
self._enabled = True
self._logger_thread.start()
def stop(self):
self._enabled = False
self._logger_thread.join(5)
|
test_threading_local.py
|
import unittest
from doctest import DocTestSuite
from test import test_support
import weakref
import gc
# Modules under test
_thread = test_support.import_module('thread')
threading = test_support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = [False]
e1 = threading.Event()
e2 = threading.Event()
def f():
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed[0] = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed[0])
def test_arguments(self):
# Issue 1522237
from thread import _local as local
from _threading_local import local as py_local
for cls in (local, py_local):
class MyLocal(cls):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, cls, a=1)
self.assertRaises(TypeError, cls, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
# Fails for the pure Python implementation
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIs(wr(), None)
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
test_support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
DesktopUtils.pyw
|
import tkinter as tk
from dotenv import load_dotenv
import os
import importlib
from infi.systray import SysTrayIcon
from win32_adapter import *
import threading
import sys
import time
load_dotenv()
class DesktopUtils:
VERSION = "1.0"
def __init__(self):
self.widgets = {}
self.running_gui = False
self.refresh_thread = threading.Thread(target=self.refresh_widgets)
menu = (
("Open", "./img/icon.ico", self._open_gui),
("Exit", "./img/x.ico", self.__shutdown),
)
self.stray = SysTrayIcon("./img/icon.ico", "DeskopUtils", menu_options=menu)
def __shutdown(self, *_):
def stop():
DestroyWindow(self.stray._hwnd)
for widget in self.widgets:
instance = self.widgets[widget]["instance"]
try:
instance.root.destroy()
except RuntimeError:
pass
t = threading.Thread(target=stop)
t.start()
self.refresh_thread.join()
sys.exit(0)
def _run_stray(self):
self.stray.start()
def _open_gui(self, _):
if self.running_gui:
return
self.running_gui = True
root = tk.Tk()
root.geometry("500x500")
root.title("DesktopUtils V" + self.VERSION)
def close_win():
root.destroy()
self.running_gui = False
self._run_stray()
root.protocol("WM_DELETE_WINDOW", close_win)
root.mainloop()
def create_root(self, widget) -> tk.Tk:
root = tk.Tk()
root.overrideredirect(True)
root.wm_resizable(*widget.RESIZE)
root.geometry(f"{widget.SIZE[0]}x{widget.SIZE[1]}+{widget.START_POS[0]}+{widget.START_POS[1]}")
return root
def add_widget(self, info, widget):
generalInfo = info.copy()
generalInfo.pop("NAME")
self.widgets[info["NAME"]] = {
"information": generalInfo,
"plugin": widget
}
def refresh_widgets(self):
time.sleep(10)
while True:
for widget in self.widgets:
if self.widgets[widget]["instance"].REFRESH:
self.widgets[widget]["instance"].refresh()
time.sleep(20)
def _create_bar(self, root, w_):
top_bar = tk.Frame(root, bg=w_.BAR_COLOR, height=25, width=200)
xImage = tk.PhotoImage(file="./img/x.png")
x = tk.Button(top_bar, image=xImage, borderwidth=0, highlightthickness=0, command=w_.quit)
x.photo = xImage
x.place(x=180, y=2)
top_bar.bind("<ButtonRelease-1>", getattr(w_, "_Widget__button_release"))
top_bar.bind("<ButtonPress-1>", getattr(w_, "_Widget__button_press"))
top_bar.bind("<B1-Motion>", getattr(w_, "_Widget__move_window"))
top_bar.pack(side=tk.TOP, anchor=tk.E)
return top_bar
def run(self):
pluginPaths = next(os.walk("./plugins/"))[1]
for folderName in pluginPaths:
path = "./plugins/" + folderName
try:
with open(path + "/info.meda", "r") as fh:
infoMeDa = fh.readlines()
except FileNotFoundError:
print("Could not load " + folderName + ". info.meda is missing!")
continue
pluginInfo = {
"NAME": None,
"AUTHOR": None,
"DESCRIPTION": None,
"VERSION": None,
"MAIN": None,
"CLASS": None
}
for line in infoMeDa:
key, value = line.split("=")
if key in pluginInfo:
pluginInfo[key] = value.replace("\n", "").replace(" ", "_")
assert pluginInfo["MAIN"] is not None
path = path[2:].replace("/", ".")
main_filename = pluginInfo["MAIN"].replace(".py", "")
main_ = importlib.import_module(".." + main_filename, path)
class_ = getattr(main_, pluginInfo["CLASS"])
self.add_widget(pluginInfo, class_)
self._run_stray()
self.refresh_thread.start()
for widget in self.widgets:
root = self.create_root(self.widgets[widget]["plugin"])
w = self.widgets[widget]["plugin"](root)
self.widgets[widget]["instance"] = w
self._create_bar(root, w)
w.run()
if __name__ == '__main__':
desktopUtils = DesktopUtils()
desktopUtils.run()
|
_coreg_gui.py
|
# -*- coding: utf-8 -*-
u"""Traits-based GUI for head-MRI coregistration.
Hierarchy
---------
This is the hierarchy of classes for control. Brackets like [1] denote
properties that are set to be equivalent.
::
CoregFrame: GUI for head-MRI coregistration.
|-- CoregModel (model): Traits object for estimating the head mri transform.
| |-- MRIHeadWithFiducialsModel (mri) [1]: Represent an MRI head shape (high and low res) with fiducials.
| | |-- SurfaceSource (bem_high_res): High-res MRI head
| | |-- SurfaceSource (bem_low_res): Low-res MRI head
| | +-- MRISubjectSource (subject_source) [2]: Find subjects in SUBJECTS_DIR and select one.
| |-- FiducialsSource (fid): Expose points of a given fiducials fif file.
| +-- DigSource (hsp): Expose measurement information from a inst file.
|-- MlabSceneModel (scene) [3]: mayavi.core.ui.mayavi_scene
|-- DataPanel (data_panel)
| |-- HeadViewController (headview) [4]: Set head views for the given coordinate system.
| | +-- MlabSceneModel (scene) [3*]: ``HeadViewController(scene=CoregFrame.scene)``
| |-- SubjectSelectorPanel (subject_panel): Subject selector panel
| | +-- MRISubjectSource (model) [2*]: ``SubjectSelectorPanel(model=self.model.mri.subject_source)``
| +-- FiducialsPanel (fid_panel): Set fiducials on an MRI surface.
| |-- MRIHeadWithFiducialsModel (model) [1*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| |-- HeadViewController (headview) [4*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| +-- SurfaceObject (hsp_obj) [5*]: ``CoregFrame.fid_panel.hsp_obj = CoregFrame.mri_obj``
|-- CoregPanel (coreg_panel): Coregistration panel for Head<->MRI with scaling.
| +-- FittingOptionsPanel (fitting_options_panel): panel for fitting options.
|-- SurfaceObject (mri_obj) [5]: Represent a solid object in a mayavi scene.
+-- PointObject ({hsp, eeg, lpa, nasion, rpa, hsp_lpa, hsp_nasion, hsp_rpa} + _obj): Represent a group of individual points in a mayavi scene.
In the MRI viewing frame, MRI points and transformed via scaling, then by
mri_head_t to the Neuromag head coordinate frame. Digitized points (in head
coordinate frame) are never transformed.
Units
-----
User-facing GUI values are in readable units:
- ``scale_*`` are in %
- ``trans_*`` are in mm
- ``rot_*`` are in °
Internal computation quantities ``parameters`` are in units of (for X/Y/Z):
- ``parameters[:3]`` are in radians
- ``parameters[3:6]`` are in m
- ``paramteres[6:9]`` are in scale proportion
Conversions are handled via `np.deg2rad`, `np.rad2deg`, and appropriate
multiplications / divisions.
""" # noqa: E501
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import traceback
import warnings
import numpy as np
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, OK, YES, NO, CANCEL, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo, Directory,
Enum, Float, HasTraits, HasPrivateTraits, Instance,
Int, on_trait_change, Property, Str, List, RGBColor)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid, EnumEditor,
Handler, Label, Spring, InstanceEditor, StatusItem,
UIInfo)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..defaults import DEFAULTS
from ..surface import _DistanceQuery
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
rotation_angles, Transform, _ensure_trans,
rot_to_quat, _angle_between_quats)
from ..coreg import fit_matched_points, scale_mri, _find_fiducials_files
from ..viz._3d import _toggle_mlab_render
from ..utils import logger, set_config, _pl
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import trans_wildcard, DigSource, SubjectSelectorPanel
from ._viewer import (HeadViewController, PointObject, SurfaceObject,
_DEG_WIDTH, _MM_WIDTH, _BUTTON_WIDTH,
_SHOW_BORDER, _COREG_WIDTH, _SCALE_STEP_WIDTH,
_INC_BUTTON_WIDTH, _SCALE_WIDTH, _WEIGHT_WIDTH,
_MM_STEP_WIDTH, _DEG_STEP_WIDTH, _REDUCED_TEXT_WIDTH,
_RESET_LABEL, _RESET_WIDTH,
laggy_float_editor_scale, laggy_float_editor_deg,
laggy_float_editor_mm, laggy_float_editor_weight)
defaults = DEFAULTS['coreg']
class busy(object):
"""Set the GUI state to busy."""
def __enter__(self): # noqa: D105
GUI.set_busy(True)
def __exit__(self, type, value, traceback): # noqa: D105
GUI.set_busy(False)
def _pass(x):
"""Format text without changing it."""
return x
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(DigSource, ())
# parameters
guess_mri_subject = Bool(True) # change MRI subject when dig file changes
grow_hair = Float(label=u"ΔHair", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape (mm)")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(100, label="X")
scale_y = Float(100, label="Y")
scale_z = Float(100, label="Z")
trans_x = Float(0, label=u"ΔX")
trans_y = Float(0, label=u"ΔY")
trans_z = Float(0, label=u"ΔZ")
rot_x = Float(0, label=u"∠X")
rot_y = Float(0, label=u"∠Y")
rot_z = Float(0, label=u"∠Z")
parameters = List()
last_parameters = List()
lpa_weight = Float(1.)
nasion_weight = Float(10.)
rpa_weight = Float(1.)
hsp_weight = Float(1.)
eeg_weight = Float(1.)
hpi_weight = Float(1.)
iteration = Int(-1)
icp_iterations = Int(20)
icp_angle = Float(0.2)
icp_distance = Float(0.2)
icp_scale = Float(0.2)
icp_fid_match = Enum('nearest', 'matched')
fit_icp_running = Bool(False)
fits_icp_running = Bool(False)
coord_frame = Enum('mri', 'head', desc='Display coordinate frame')
status_text = Str()
# options during scaling
scale_labels = Bool(True, desc="whether to scale *.label files")
copy_annot = Bool(True, desc="whether to copy *.annot files for scaled "
"subject")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
has_nasion_data = Property(
Bool, depends_on=['mri:nasion', 'hsp:nasion'])
has_lpa_data = Property(
Bool, depends_on=['mri:lpa', 'hsp:lpa'])
has_rpa_data = Property(
Bool, depends_on=['mri:rpa', 'hsp:rpa'])
has_fid_data = Property( # conjunction
Bool, depends_on=['has_nasion_data', 'has_lpa_data', 'has_rpa_data'])
has_mri_data = Property(
Bool, depends_on=['transformed_high_res_mri_points'])
has_hsp_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:points'])
has_eeg_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:eeg_points'])
has_hpi_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:hpi_points'])
n_icp_points = Property(
Int, depends_on=['has_nasion_data', 'nasion_weight',
'has_lpa_data', 'lpa_weight',
'has_rpa_data', 'rpa_weight',
'hsp:points', 'hsp_weight',
'hsp:eeg_points', 'eeg_weight',
'hsp:hpi_points', 'hpi_weight'])
changes = Property(depends_on=['parameters', 'old_parameters'])
# target transforms
mri_head_t = Property(
desc="Transformation of the scaled MRI to the head coordinate frame.",
depends_on=['parameters[]'])
head_mri_t = Property(depends_on=['mri_head_t'])
mri_trans = Property(depends_on=['mri_head_t', 'parameters[]',
'coord_frame'])
hsp_trans = Property(depends_on=['head_mri_t', 'coord_frame'])
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(
Bool,
depends_on=['n_scale_params', 'subject_has_bem'])
can_save = Property(Bool, depends_on=['mri_head_t'])
raw_subject = Property(
desc="Subject guess based on the raw file name.",
depends_on=['hsp:inst_fname'])
# MRI geometry transformed to viewing coordinate system
processed_high_res_mri_points = Property(
depends_on=['mri:bem_high_res:surf', 'grow_hair'])
processed_low_res_mri_points = Property(
depends_on=['mri:bem_low_res:surf', 'grow_hair'])
transformed_high_res_mri_points = Property(
depends_on=['processed_high_res_mri_points', 'mri_trans'])
transformed_low_res_mri_points = Property(
depends_on=['processed_low_res_mri_points', 'mri_trans'])
nearest_calc = Property(
Instance(_DistanceQuery),
depends_on=['transformed_high_res_mri_points'])
nearest_transformed_high_res_mri_idx_lpa = Property(
depends_on=['nearest_calc', 'transformed_hsp_lpa'])
nearest_transformed_high_res_mri_idx_nasion = Property(
depends_on=['nearest_calc', 'transformed_hsp_nasion'])
nearest_transformed_high_res_mri_idx_rpa = Property(
depends_on=['nearest_calc', 'transformed_hsp_rpa'])
nearest_transformed_high_res_mri_idx_hsp = Property(
depends_on=['nearest_calc', 'transformed_hsp_points'])
nearest_transformed_high_res_mri_idx_orig_hsp = Property(
depends_on=['nearest_calc', 'transformed_orig_hsp_points'])
nearest_transformed_high_res_mri_idx_eeg = Property(
depends_on=['nearest_calc', 'transformed_hsp_eeg_points'])
nearest_transformed_high_res_mri_idx_hpi = Property(
depends_on=['nearest_calc', 'transformed_hsp_hpi'])
transformed_mri_lpa = Property(
depends_on=['mri:lpa', 'mri_trans'])
transformed_mri_nasion = Property(
depends_on=['mri:nasion', 'mri_trans'])
transformed_mri_rpa = Property(
depends_on=['mri:rpa', 'mri_trans'])
# HSP geometry transformed to viewing coordinate system
transformed_hsp_points = Property(
depends_on=['hsp:points', 'hsp_trans'])
transformed_orig_hsp_points = Property(
depends_on=['hsp:_hsp_points', 'hsp_trans'])
transformed_hsp_lpa = Property(
depends_on=['hsp:lpa', 'hsp_trans'])
transformed_hsp_nasion = Property(
depends_on=['hsp:nasion', 'hsp_trans'])
transformed_hsp_rpa = Property(
depends_on=['hsp:rpa', 'hsp_trans'])
transformed_hsp_eeg_points = Property(
depends_on=['hsp:eeg_points', 'hsp_trans'])
transformed_hsp_hpi = Property(
depends_on=['hsp:hpi', 'hsp_trans'])
# fit properties
lpa_distance = Property(
depends_on=['transformed_mri_lpa', 'transformed_hsp_lpa'])
nasion_distance = Property(
depends_on=['transformed_mri_nasion', 'transformed_hsp_nasion'])
rpa_distance = Property(
depends_on=['transformed_mri_rpa', 'transformed_hsp_rpa'])
point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_hsp',
'nearest_transformed_high_res_mri_idx_eeg',
'nearest_transformed_high_res_mri_idx_hpi',
'hsp_weight',
'eeg_weight',
'hpi_weight'])
orig_hsp_point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_orig_hsp',
'hpi_weight'])
# fit property info strings
fid_eval_str = Property(
depends_on=['lpa_distance', 'nasion_distance', 'rpa_distance'])
points_eval_str = Property(
depends_on=['point_distance'])
def _parameters_default(self):
return list(_DEFAULT_PARAMETERS)
def _last_parameters_default(self):
return list(_DEFAULT_PARAMETERS)
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.mri_head_t != np.eye(4))
@cached_property
def _get_has_lpa_data(self):
return (np.any(self.mri.lpa) and np.any(self.hsp.lpa))
@cached_property
def _get_has_nasion_data(self):
return (np.any(self.mri.nasion) and np.any(self.hsp.nasion))
@cached_property
def _get_has_rpa_data(self):
return (np.any(self.mri.rpa) and np.any(self.hsp.rpa))
@cached_property
def _get_has_fid_data(self):
return self.has_nasion_data and self.has_lpa_data and self.has_rpa_data
@cached_property
def _get_has_mri_data(self):
return len(self.transformed_high_res_mri_points) > 0
@cached_property
def _get_has_hsp_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hsp) > 0)
@cached_property
def _get_has_eeg_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_eeg) > 0)
@cached_property
def _get_has_hpi_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hpi) > 0)
@cached_property
def _get_n_icp_points(self):
"""Get parameters for an ICP iteration."""
n = (self.hsp_weight > 0) * len(self.hsp.points)
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
n += 1
n += (self.eeg_weight > 0) * len(self.hsp.eeg_points)
n += (self.hpi_weight > 0) * len(self.hsp.hpi_points)
return n
@cached_property
def _get_changes(self):
new = np.array(self.parameters, float)
old = np.array(self.last_parameters, float)
move = np.linalg.norm(old[3:6] - new[3:6]) * 1e3
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(rotation(*new[:3])[:3, :3]),
rot_to_quat(rotation(*old[:3])[:3, :3])))
percs = 100 * (new[6:] - old[6:]) / old[6:]
return move, angle, percs
@cached_property
def _get_mri_head_t(self):
# rotate and translate hsp
trans = rotation(*self.parameters[:3])
trans[:3, 3] = np.array(self.parameters[3:6])
return trans
@cached_property
def _get_head_mri_t(self):
trans = rotation(*self.parameters[:3]).T
trans[:3, 3] = -np.dot(trans[:3, :3], self.parameters[3:6])
# should be the same as np.linalg.inv(self.mri_head_t)
return trans
@cached_property
def _get_processed_high_res_mri_points(self):
if self.grow_hair:
if len(self.mri.bem_high_res.surf.nn):
scaled_hair_dist = (1e-3 * self.grow_hair /
np.array(self.parameters[6:9]))
points = self.mri.bem_high_res.surf.rr.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += (self.mri.bem_high_res.surf.nn[hair] *
scaled_hair_dist)
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
else:
return self.mri.bem_high_res.surf.rr
@cached_property
def _get_processed_low_res_mri_points(self):
if self.grow_hair:
if len(self.mri.bem_low_res.surf.nn):
scaled_hair_dist = (1e-3 * self.grow_hair /
np.array(self.parameters[6:9]))
points = self.mri.bem_low_res.surf.rr.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += (self.mri.bem_low_res.surf.nn[hair] *
scaled_hair_dist)
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
else:
return self.mri.bem_low_res.surf.rr
@cached_property
def _get_mri_trans(self):
mri_scaling = np.ones(4)
mri_scaling[:3] = self.parameters[6:9]
if self.coord_frame == 'head':
t = self.mri_head_t
else:
t = np.eye(4)
return t * mri_scaling
@cached_property
def _get_hsp_trans(self):
if self.coord_frame == 'head':
t = np.eye(4)
else:
t = self.head_mri_t
return t
@cached_property
def _get_nearest_transformed_high_res_mri_idx_lpa(self):
return self.nearest_calc.query(self.transformed_hsp_lpa)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_nasion(self):
return self.nearest_calc.query(self.transformed_hsp_nasion)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_rpa(self):
return self.nearest_calc.query(self.transformed_hsp_rpa)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hsp(self):
return self.nearest_calc.query(self.transformed_hsp_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_orig_hsp(self):
# This is redundant to some extent with the one above due to
# overlapping points, but it's fast and the refactoring to
# remove redundancy would be a pain.
return self.nearest_calc.query(self.transformed_orig_hsp_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_eeg(self):
return self.nearest_calc.query(self.transformed_hsp_eeg_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hpi(self):
return self.nearest_calc.query(self.transformed_hsp_hpi)[1]
# MRI view-transformed data
@cached_property
def _get_transformed_low_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_low_res_mri_points)
return points
@cached_property
def _get_nearest_calc(self):
return _DistanceQuery(self.transformed_high_res_mri_points)
@cached_property
def _get_transformed_high_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_high_res_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_trans, self.mri.rpa)
# HSP view-transformed data
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp.points)
@cached_property
def _get_transformed_orig_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp._hsp_points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.hsp_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.hsp_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.hsp_trans, self.hsp.rpa)
@cached_property
def _get_transformed_hsp_eeg_points(self):
return apply_trans(self.hsp_trans, self.hsp.eeg_points)
@cached_property
def _get_transformed_hsp_hpi(self):
return apply_trans(self.hsp_trans, self.hsp.hpi_points)
# Distances, etc.
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.linalg.norm(d)
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.linalg.norm(d)
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.linalg.norm(d)
@cached_property
def _get_point_distance(self):
mri_points = list()
hsp_points = list()
if self.hsp_weight > 0 and self.has_hsp_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
hsp_points.append(self.transformed_hsp_points)
if self.eeg_weight > 0 and self.has_eeg_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
hsp_points.append(self.transformed_hsp_eeg_points)
if self.hpi_weight > 0 and self.has_hpi_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
hsp_points.append(self.transformed_hsp_hpi)
if all(len(h) == 0 for h in hsp_points):
return None
mri_points = np.concatenate(mri_points)
hsp_points = np.concatenate(hsp_points)
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_orig_hsp_point_distance(self):
mri_points = self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_orig_hsp]
hsp_points = self.transformed_orig_hsp_points
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
return u'Fiducials: %.1f, %.1f, %.1f mm' % d
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
dists = 1000 * self.point_distance
av_dist = np.mean(dists)
std_dist = np.std(dists)
kinds = [kind for kind, check in
(('HSP', self.hsp_weight > 0 and self.has_hsp_data),
('EEG', self.eeg_weight > 0 and self.has_eeg_data),
('HPI', self.hpi_weight > 0 and self.has_hpi_data))
if check]
return (u"%s %s: %.1f ± %.1f mm"
% (len(dists), '+'.join(kinds), av_dist, std_dist))
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if subject:
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if self.guess_mri_subject:
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if distance <= 0:
return
# find the new filter
mask = self.orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = mask
def fit_fiducials(self, n_scale_params=None):
"""Find rotation and translation to fit all 3 fiducials."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
head_pts = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
mri_pts = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
weights = [self.lpa_weight, self.nasion_weight, self.rpa_weight]
assert n_scale_params in (0, 1) # guaranteed by GUI
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
x0 = np.array(self.parameters[:6 + n_scale_params])
est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params',
scale=n_scale_params, weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
else:
self.parameters[:] = np.concatenate([est, [est[-1]] * 2])
def _setup_icp(self, n_scale_params):
"""Get parameters for an ICP iteration."""
head_pts = list()
mri_pts = list()
weights = list()
if self.has_hsp_data and self.hsp_weight > 0: # should be true
head_pts.append(self.hsp.points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
weights.append(np.full(len(head_pts[-1]), self.hsp_weight))
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
head_pts.append(getattr(self.hsp, key))
if self.icp_fid_match == 'matched':
mri_pts.append(getattr(self.mri, key))
else:
assert self.icp_fid_match == 'nearest'
mri_pts.append(self.processed_high_res_mri_points[
getattr(self, 'nearest_transformed_high_res_mri_idx_%s'
% (key,))])
weights.append(np.full(len(mri_pts[-1]),
getattr(self, '%s_weight' % key)))
if self.has_eeg_data and self.eeg_weight > 0:
head_pts.append(self.hsp.eeg_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
weights.append(np.full(len(mri_pts[-1]), self.eeg_weight))
if self.has_hpi_data and self.hpi_weight > 0:
head_pts.append(self.hsp.hpi_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
weights.append(np.full(len(mri_pts[-1]), self.hpi_weight))
head_pts = np.concatenate(head_pts)
mri_pts = np.concatenate(mri_pts)
weights = np.concatenate(weights)
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
return head_pts, mri_pts, weights
def fit_icp(self, n_scale_params=None):
"""Find MRI scaling, translation, and rotation to match HSP."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
# Initial guess (current state)
assert n_scale_params in (0, 1, 3)
est = self.parameters[:[6, 7, None, 9][n_scale_params]]
# Do the fits, assigning and evaluating at each step
attr = 'fit_icp_running' if n_scale_params == 0 else 'fits_icp_running'
setattr(self, attr, True)
GUI.process_events() # update the cancel button
for self.iteration in range(self.icp_iterations):
head_pts, mri_pts, weights = self._setup_icp(n_scale_params)
est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params,
x0=est, out='params', weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
elif n_scale_params == 1:
self.parameters[:] = list(est) + [est[-1]] * 2
else:
self.parameters[:] = est
angle, move, scale = self.changes
if angle <= self.icp_angle and move <= self.icp_distance and \
all(scale <= self.icp_scale):
self.status_text = self.status_text[:-1] + '; converged)'
break
if not getattr(self, attr): # canceled by user
self.status_text = self.status_text[:-1] + '; cancelled)'
break
GUI.process_events() # this will update the head view
else:
self.status_text = self.status_text[:-1] + '; did not converge)'
setattr(self, attr, False)
self.iteration = -1
def get_scaling_job(self, subject_to, skip_fiducials):
"""Find all arguments needed for the scaling worker."""
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if self.can_prepare_bem_model and self.prepare_bem_model:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.parameters[6:9],
skip_fiducials, self.scale_labels, self.copy_annot, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file.
Parameters
----------
fname : str
File path.
"""
self.set_trans(_ensure_trans(read_trans(fname, return_all=True),
'mri', 'head')['trans'])
def reset(self):
"""Reset all the parameters affecting the coregistration."""
with busy():
self.reset_traits(('grow_hair', 'n_scaling_params'))
self.parameters[:] = _DEFAULT_PARAMETERS
self.omit_hsp_points(np.inf)
def set_trans(self, mri_head_t):
"""Set rotation and translation params from a transformation matrix.
Parameters
----------
mri_head_t : array, shape (4, 4)
Transformation matrix from MRI to head space.
"""
with busy():
rot_x, rot_y, rot_z = rotation_angles(mri_head_t)
x, y, z = mri_head_t[:3, 3]
self.parameters[:6] = [rot_x, rot_y, rot_z, x, y, z]
def save_trans(self, fname):
"""Save the head-mri transform as a fif file.
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_t))
def _parameters_items_changed(self):
# Update GUI as necessary
n_scale = self.n_scale_params
for ii, key in enumerate(('rot_x', 'rot_y', 'rot_z')):
val = np.rad2deg(self.parameters[ii])
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('trans_x', 'trans_y', 'trans_z')):
val = self.parameters[ii + 3] * 1e3
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('scale_x', 'scale_y', 'scale_z')):
val = self.parameters[ii + 6] * 1e2
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
# Update the status text
move, angle, percs = self.changes
text = u'Change: Δ=%0.1f mm ∠=%0.2f°' % (move, angle)
if n_scale:
text += ' Scale ' if n_scale == 1 else ' Sx/y/z '
text += '/'.join(['%+0.1f%%' % p for p in percs[:n_scale]])
if self.iteration >= 0:
text += u' (iteration %d/%d)' % (self.iteration + 1,
self.icp_iterations)
self.last_parameters[:] = self.parameters[:]
self.status_text = text
def _rot_x_changed(self):
self.parameters[0] = np.deg2rad(self.rot_x)
def _rot_y_changed(self):
self.parameters[1] = np.deg2rad(self.rot_y)
def _rot_z_changed(self):
self.parameters[2] = np.deg2rad(self.rot_z)
def _trans_x_changed(self):
self.parameters[3] = self.trans_x * 1e-3
def _trans_y_changed(self):
self.parameters[4] = self.trans_y * 1e-3
def _trans_z_changed(self):
self.parameters[5] = self.trans_z * 1e-3
def _scale_x_changed(self):
if self.n_scale_params == 1:
self.parameters[6:9] = [self.scale_x * 1e-2] * 3
else:
self.parameters[6] = self.scale_x * 1e-2
def _scale_y_changed(self):
self.parameters[7] = self.scale_y * 1e-2
def _scale_z_changed(self):
self.parameters[8] = self.scale_z * 1e-2
class CoregFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def object_title_changed(self, info):
"""Set the title when it gets changed."""
info.ui.title = info.object.title
def close(self, info, is_ok):
"""Handle the close event."""
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
try: # works on Qt only for now
size = (info.ui.control.width(), info.ui.control.height())
except AttributeError:
size = None
# store configuration, but don't prevent from closing on error
try:
info.object.save_config(size=size)
except Exception as exc:
warnings.warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class CoregPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_fitting_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_fitting_options_changed(self, info): # noqa: D102
self.info.object.fitting_options_panel.edit_traits(
parent=self.info.ui.control)
def object_load_trans_changed(self, info): # noqa: D102
# find trans file destination
model = self.info.object.model
raw_dir = os.path.dirname(model.hsp.file)
subject = model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file, parent=self.info.ui.control)
if dlg.open() != OK:
return
trans_file = dlg.path
try:
model.load_trans(trans_file)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans_file, e), "Error Loading Trans File")
raise
def object_save_changed(self, info): # noqa: D102
obj = self.info.object
subjects_dir = obj.model.mri.subjects_dir
subject_from = obj.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if obj.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(None, msg, title, cancel=True, default=CANCEL,
parent=self.info.ui.control)
if rc == CANCEL:
return
elif rc == YES:
obj.model.mri.save(obj.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if obj.n_scale_params:
subject_to = obj.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal',
parent=self.info.ui.control)
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(obj.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file,
parent=self.info.ui.control)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
obj.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % (e,), "Error Saving Trans File")
raise
# save the scaled MRI
if obj.n_scale_params:
job = obj.model.get_scaling_job(subject_to, skip_fiducials)
obj.queue.put(job)
obj.queue_len += 1
def _make_view_data_panel(scrollable=False):
view = View(VGroup(
VGroup(Item('subject_panel', style='custom'), label="MRI Subject",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2, values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup(Item('hsp_always_visible',
label='Show head shape points', show_label=True,
enabled_when='not lock_fiducials', width=-1),
show_left=False),
Item('fid_panel', style='custom'), label="MRI Fiducials",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup('guess_mri_subject',
Label('Guess subject from name'), show_labels=False),
VGrid(Item('grow_hair', editor=laggy_float_editor_mm,
width=_MM_WIDTH),
Label(u'ΔHair', show_label=True, width=-1), '0',
Item('distance', show_label=False, width=_MM_WIDTH,
editor=laggy_float_editor_mm),
Item('omit_points', width=_BUTTON_WIDTH),
Item('reset_omit_points', width=_RESET_WIDTH),
columns=3, show_labels=False),
Item('omitted_info', style='readonly',
width=_REDUCED_TEXT_WIDTH), label='Digitization source',
show_border=_SHOW_BORDER, show_labels=False),
VGroup(HGroup(Item('headview', style='custom'), Spring(),
show_labels=False),
Item('view_options', width=_REDUCED_TEXT_WIDTH),
label='View', show_border=_SHOW_BORDER, show_labels=False),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=DataPanelHandler())
return view
def _make_view_coreg_panel(scrollable=False):
"""Generate View for CoregPanel."""
view = View(VGroup(
# Scaling
HGroup(Item('n_scale_params', label='Scaling mode',
editor=EnumEditor(values={0: '1:None',
1: '2:Uniform',
3: '3:3-axis'})), Spring()),
VGrid(Item('scale_x', editor=laggy_float_editor_scale,
show_label=True, tooltip="Scale along right-left axis (%)",
enabled_when='n_scale_params > 0', width=_SCALE_WIDTH),
Item('scale_x_dec', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_x_inc', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_step', tooltip="Scaling step (%)",
enabled_when='n_scale_params > 0', width=_SCALE_STEP_WIDTH),
Spring(),
Item('scale_y', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior axis (%)",
width=_SCALE_WIDTH),
Item('scale_y_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_y_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_SCALE_WIDTH),
Spring(),
Item('scale_z', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1', width=_SCALE_WIDTH,
tooltip="Scale along anterior-posterior axis (%)"),
Item('scale_z_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_z_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
'0',
Spring(),
label='Scaling parameters', show_labels=False, columns=5,
show_border=_SHOW_BORDER),
VGrid(Item('fits_icp', enabled_when='n_scale_params > 0 and '
'n_icp_points >= 10',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fits_fid', enabled_when='n_scale_params == 1 and '
'has_fid_data',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance of the three fiducials.",
width=_BUTTON_WIDTH),
Item('cancels_icp', enabled_when="fits_icp_running",
tooltip='Stop ICP fitting', width=_RESET_WIDTH),
Item('reset_scale', enabled_when='n_scale_params',
tooltip="Reset scaling parameters", width=_RESET_WIDTH),
show_labels=False, columns=4),
# Translation and rotation
VGrid(Item('trans_x', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along right-left axis", width=_MM_WIDTH),
Item('trans_x_dec', width=_INC_BUTTON_WIDTH),
Item('trans_x_inc', width=_INC_BUTTON_WIDTH),
Item('trans_step', tooltip="Movement step (mm)",
width=_MM_STEP_WIDTH),
Spring(),
Item('trans_y', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_y_dec', width=_INC_BUTTON_WIDTH),
Item('trans_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_MM_WIDTH),
Spring(),
Item('trans_z', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_z_dec', width=_INC_BUTTON_WIDTH),
Item('trans_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
Item('rot_x', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization backward (-) or forward (+)",
width=_DEG_WIDTH),
Item('rot_x_dec', width=_INC_BUTTON_WIDTH),
Item('rot_x_inc', width=_INC_BUTTON_WIDTH),
Item('rot_step', tooltip=u"Rotation step (°)",
width=_DEG_STEP_WIDTH),
Spring(),
Item('rot_y', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization rightward (-) or "
"leftward (+)", width=_DEG_WIDTH),
Item('rot_y_dec', width=_INC_BUTTON_WIDTH),
Item('rot_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_DEG_WIDTH),
Spring(),
Item('rot_z', editor=laggy_float_editor_deg, show_label=True,
tooltip="Turn the digitization leftward (-) or "
"rightward (+)", width=_DEG_WIDTH),
Item('rot_z_dec', width=_INC_BUTTON_WIDTH),
Item('rot_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
columns=5, show_labels=False, show_border=_SHOW_BORDER,
label=u'Translation (Δ) and Rotation (∠)'),
VGroup(Item('fit_icp', enabled_when='n_icp_points >= 10',
tooltip="Rotate and translate the MRI to minimize the "
"distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fit_fid', enabled_when="has_fid_data",
tooltip="Rotate and translate the MRI to minimize the "
"distance of the three fiducials.", width=_BUTTON_WIDTH),
Item('cancel_icp', enabled_when="fit_icp_running",
tooltip='Stop ICP iterations', width=_RESET_WIDTH),
Item('reset_tr', tooltip="Reset translation and rotation.",
width=_RESET_WIDTH),
show_labels=False, columns=4),
# Fitting weights
Item('fid_eval_str', style='readonly', tooltip='Fiducial differences',
width=_REDUCED_TEXT_WIDTH),
Item('points_eval_str', style='readonly',
tooltip='Point error (μ ± σ)', width=_REDUCED_TEXT_WIDTH),
Item('fitting_options', width=_REDUCED_TEXT_WIDTH, show_label=False),
VGrid(Item('scale_labels', label="Scale label files",
enabled_when='n_scale_params > 0'),
Item('copy_annot', label="Copy annotation files",
enabled_when='n_scale_params > 0'),
Item('prepare_bem_model', label="Prepare BEM",
enabled_when='can_prepare_bem_model'),
show_left=False, label='Subject-saving options', columns=1,
show_border=_SHOW_BORDER),
VGrid(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if scaling is enabled) "
"the scaled MRI", width=_BUTTON_WIDTH),
Item('load_trans', width=_BUTTON_WIDTH,
tooltip="Load Head<->MRI trans file"),
Item('reset_params', tooltip="Reset all coregistration "
"parameters", width=_RESET_WIDTH),
show_labels=False, columns=3),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=CoregPanelHandler())
return view
class FittingOptionsPanel(HasTraits):
"""View options panel."""
model = Instance(CoregModel)
lpa_weight = DelegatesTo('model')
nasion_weight = DelegatesTo('model')
rpa_weight = DelegatesTo('model')
hsp_weight = DelegatesTo('model')
eeg_weight = DelegatesTo('model')
hpi_weight = DelegatesTo('model')
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
icp_iterations = DelegatesTo('model')
icp_angle = DelegatesTo('model')
icp_distance = DelegatesTo('model')
icp_scale = DelegatesTo('model')
icp_fid_match = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
view = View(VGroup(
VGrid(HGroup(Item('icp_iterations', label='Iterations',
width=_MM_WIDTH, tooltip='Maximum ICP iterations to '
'perform (per click)'),
Spring(), show_labels=True), label='ICP iterations (max)',
show_border=_SHOW_BORDER),
VGrid(Item('icp_angle', label=u'Angle (°)', width=_MM_WIDTH,
tooltip='Angle convergence threshold'),
Item('icp_distance', label='Distance (mm)', width=_MM_WIDTH,
tooltip='Distance convergence threshold'),
Item('icp_scale', label='Scale (%)',
tooltip='Scaling convergence threshold', width=_MM_WIDTH,
enabled_when='n_scale_params > 0'),
show_labels=True, label='ICP convergence limits', columns=3,
show_border=_SHOW_BORDER),
VGrid(Item('icp_fid_match', width=-1, show_label=False,
editor=EnumEditor(values=dict(
nearest='1:Closest to surface',
matched='2:MRI fiducials'), cols=2,
format_func=lambda x: x),
tooltip='Match digitization fiducials to MRI fiducials or '
'the closest surface point', style='custom'),
label='Fiducial point matching', show_border=_SHOW_BORDER),
VGrid(
VGrid(Item('lpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for LPA", width=_WEIGHT_WIDTH,
enabled_when='has_lpa_data', label='LPA'),
Item('nasion_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for nasion", label='Nasion',
width=_WEIGHT_WIDTH, enabled_when='has_nasion_data'),
Item('rpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for RPA", width=_WEIGHT_WIDTH,
enabled_when='has_rpa_data', label='RPA'),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Fiducials'),
VGrid(Item('hsp_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for head shape points",
enabled_when='has_hsp_data',
label='HSP', width=_WEIGHT_WIDTH,),
Item('eeg_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for EEG points", label='EEG',
enabled_when='has_eeg_data', width=_WEIGHT_WIDTH),
Item('hpi_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for HPI points", label='HPI',
enabled_when='has_hpi_data', width=_WEIGHT_WIDTH),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Other points (closest-point matched)'),
show_labels=False, label='Point weights', columns=2,
show_border=_SHOW_BORDER),
), title="Fitting options")
_DEFAULT_PARAMETERS = (0., 0., 0., 0., 0., 0., 1., 1., 1.)
class CoregPanel(HasPrivateTraits):
"""Coregistration panel for Head<->MRI with scaling."""
model = Instance(CoregModel)
# parameters
reset_params = Button(label=_RESET_LABEL)
n_scale_params = DelegatesTo('model')
parameters = DelegatesTo('model')
scale_step = Float(1.)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(1.)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(1.)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_fid_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
n_icp_points = DelegatesTo('model')
# fitting with scaling
fits_icp = Button(label='Fit (ICP)')
fits_fid = Button(label='Fit Fid.')
cancels_icp = Button(u'■')
reset_scale = Button(label=_RESET_LABEL)
fits_icp_running = DelegatesTo('model')
# fitting without scaling
fit_icp = Button(label='Fit (ICP)')
fit_fid = Button(label='Fit Fid.')
cancel_icp = Button(label=u'■')
reset_tr = Button(label=_RESET_LABEL)
fit_icp_running = DelegatesTo('model')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
scale_labels = DelegatesTo('model')
copy_annot = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save...")
load_trans = Button(label='Load...')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_status_text = Property(
Str, depends_on=['queue_feedback', 'queue_current', 'queue_len'])
fitting_options_panel = Instance(FittingOptionsPanel)
fitting_options = Button('Fitting options...')
def _fitting_options_panel_default(self):
return FittingOptionsPanel(model=self.model)
view = _make_view_coreg_panel()
def __init__(self, *args, **kwargs): # noqa: D102
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
include_labels, include_annot, bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials, include_labels,
include_annot)
except Exception:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except Exception:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_status_text(self):
items = []
if self.queue_current:
items.append(self.queue_current)
if self.queue_feedback:
items.append(self.queue_feedback)
if self.queue_len:
items.append("%i queued" % self.queue_len)
return ' | '.join(items)
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _n_scale_params_fired(self):
if self.n_scale_params == 0:
use = [1] * 3
elif self.n_scale_params == 1:
use = [np.mean([self.scale_x, self.scale_y, self.scale_z]) /
100.] * 3
else:
use = self.parameters[6:9]
self.parameters[6:9] = use
def _fit_fid_fired(self):
with busy():
self.model.fit_fiducials(0)
def _fit_icp_fired(self):
with busy():
self.model.fit_icp(0)
def _fits_fid_fired(self):
with busy():
self.model.fit_fiducials()
def _fits_icp_fired(self):
with busy():
self.model.fit_icp()
def _cancel_icp_fired(self):
self.fit_icp_running = False
def _cancels_icp_fired(self):
self.fits_icp_running = False
def _reset_scale_fired(self):
self.reset_traits(('scale_x', 'scale_y', 'scale_z'))
def _reset_tr_fired(self):
self.reset_traits(('trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z'))
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_y_dec_fired(self):
self.scale_y -= self.scale_step
def _scale_y_inc_fired(self):
self.scale_y += self.scale_step
def _scale_z_dec_fired(self):
self.scale_z -= self.scale_step
def _scale_z_inc_fired(self):
self.scale_z += self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
"""New MRI dialog."""
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, width=800, height=600,
scrollable=True):
"""Create a view for the CoregFrame."""
# Set the width to 0.99 to "push out" as much as possible, use
# scene_width in the View below
scene = Item('scene', show_label=False, width=0.99,
editor=SceneEditor(scene_class=MayaviScene))
data_panel = VGroup(
Item('data_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_data_panel(scrollable))),
label='Data', show_border=not scrollable, show_labels=False)
# Setting `scrollable=True` for a Group does not seem to have any effect
# (macOS), in order to be effective the parameter has to be set for a View
# object; hence we use a special InstanceEditor to set the parameter
# programmatically:
coreg_panel = VGroup(
Item('coreg_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_coreg_panel(scrollable))),
label="Coregistration", show_border=not scrollable, show_labels=False,
enabled_when="data_panel.fid_panel.locked")
main_layout = 'split' if split else 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
# Here we set the width and height to impossibly small numbers to force the
# window to be as tight as possible
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons, width=width, height=height,
statusbar=[StatusItem('status_text', width=0.55),
StatusItem('queue_status_text', width=0.45)])
return view
class ViewOptionsPanel(HasTraits):
"""View options panel."""
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
bgcolor = RGBColor()
coord_frame = Enum('mri', 'head', label='Display coordinate frame')
head_high_res = Bool(True, label='Show high-resolution head')
view = View(
VGroup(
Item('mri_obj', style='custom', label="MRI"),
Item('hsp_obj', style='custom', label="Head shape"),
Item('eeg_obj', style='custom', label='EEG'),
Item('hpi_obj', style='custom', label='HPI'),
VGrid(Item('coord_frame', style='custom',
editor=EnumEditor(values={'mri': '1:MRI',
'head': '2:Head'}, cols=2,
format_func=_pass)),
Spring(),
Item('head_high_res'),
Spring(), columns=2, show_labels=True),
Item('hsp_cf_obj', style='custom', label='Head axes'),
Item('mri_cf_obj', style='custom', label='MRI axes'),
HGroup(Item('bgcolor', label='Background'), Spring()),
), title="Display options")
class DataPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_view_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_view_options_changed(self, info): # noqa: D102
self.info.object.view_options_panel.edit_traits(
parent=self.info.ui.control)
class DataPanel(HasTraits):
"""Data loading panel."""
# Set by CoregPanel
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
lock_fiducials = DelegatesTo('model')
guess_mri_subject = DelegatesTo('model')
raw_src = DelegatesTo('model', 'hsp')
# Set internally
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
headview = Instance(HeadViewController)
view_options_panel = Instance(ViewOptionsPanel)
hsp_always_visible = Bool(False, label="Always Show Head Shape")
view_options = Button(label="Display options...")
# Omit Points
distance = Float(10., desc="maximal distance for head shape points from "
"the surface (mm)")
omit_points = Button(label='Omit', desc="to omit head shape points "
"for the purpose of the automatic coregistration "
"procedure (mm).")
grow_hair = DelegatesTo('model')
reset_omit_points = Button(label=_RESET_LABEL, desc="to reset the "
"omission of head shape points to include all.")
omitted_info = Str('No points omitted')
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
return FiducialsPanel(model=self.model.mri, headview=self.headview)
def _headview_default(self):
return HeadViewController(system='RAS', scene=self.scene)
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
n_omitted = self.model.hsp.n_omitted
self.omitted_info = (
"%s pt%s omitted (%0.1f mm)"
% (n_omitted if n_omitted > 0 else 'No', _pl(n_omitted),
self.distance))
@on_trait_change('model:hsp:file')
def _file_change(self):
self._reset_omit_points_fired()
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(np.inf)
self.omitted_info = 'No points omitted (reset)'
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration."""
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
head_high_res = Bool(True)
data_panel = Instance(DataPanel)
coreg_panel = Instance(CoregPanel) # right panel
project_to_surface = DelegatesTo('eeg_obj')
orient_to_surface = DelegatesTo('hsp_obj')
scale_by_distance = DelegatesTo('hsp_obj')
mark_inside = DelegatesTo('hsp_obj')
status_text = DelegatesTo('model')
queue_status_text = DelegatesTo('coreg_panel')
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
title = Str('MNE Coreg')
# visualization (MRI)
mri_obj = Instance(SurfaceObject)
mri_lpa_obj = Instance(PointObject)
mri_nasion_obj = Instance(PointObject)
mri_rpa_obj = Instance(PointObject)
bgcolor = RGBColor((0.5, 0.5, 0.5))
# visualization (Digitization)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['data_panel:hsp_always_visible',
'lock_fiducials'])
# Coordinate frame axes
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
picker = Instance(object)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _model_default(self):
return CoregModel(
scale_labels=self._config.get(
'MNE_COREG_SCALE_LABELS', 'true') == 'true',
copy_annot=self._config.get(
'MNE_COREG_COPY_ANNOT', 'true') == 'true',
prepare_bem_model=self._config.get(
'MNE_COREG_PREPARE_BEM', 'true') == 'true')
def _data_panel_default(self):
return DataPanel(model=self.model, scene=self.scene)
def _coreg_panel_default(self):
return CoregPanel(model=self.model)
def __init__(self, raw=None, subject=None, subjects_dir=None,
guess_mri_subject=True, head_opacity=1.,
head_high_res=True, trans=None, config=None,
project_eeg=False, orient_to_surface=False,
scale_by_distance=False, mark_inside=False,
interaction='trackball', scale=0.16): # noqa: D102
self._config = config or {}
super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject,
head_high_res=head_high_res)
self._initial_kwargs = dict(project_eeg=project_eeg,
orient_to_surface=orient_to_surface,
scale_by_distance=scale_by_distance,
mark_inside=mark_inside,
head_opacity=head_opacity,
interaction=interaction,
scale=scale)
self._locked_opacity = self._initial_kwargs['head_opacity']
if not 0 <= head_opacity <= 1:
raise ValueError(
"head_opacity needs to be a floating point number between 0 "
"and 1, got %r" % (head_opacity,))
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if raw is not None:
self.model.hsp.file = raw
if subject is not None:
if subject not in self.model.mri.subject_source.subjects:
msg = "%s is not a valid subject. " % subject
# no subjects -> ['']
if any(self.model.mri.subject_source.subjects):
ss = ', '.join(self.model.mri.subject_source.subjects)
msg += ("The following subjects have been found: %s "
"(subjects_dir=%s). " %
(ss, self.model.mri.subjects_dir))
else:
msg += ("No subjects were found in subjects_dir=%s. " %
self.model.mri.subjects_dir)
msg += ("Make sure all MRI subjects have head shape files "
"(run $ mne make_scalp_surfaces).")
raise ValueError(msg)
self.model.mri.subject = subject
if trans is not None:
try:
self.model.load_trans(trans)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans, e), "Error Loading Trans File")
@on_trait_change('subject_panel:subject')
def _set_title(self):
self.title = '%s - MNE Coreg' % self.model.mri.subject
@on_trait_change('scene:activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['head_color']
self.mri_obj = SurfaceObject(
points=np.empty((0, 3)), color=color, tris=np.empty((0, 3)),
scene=self.scene, name="MRI Scalp", block_behind=True,
# opacity=self._initial_kwargs['head_opacity'],
# setting opacity here causes points to be
# [[0, 0, 0]] -- why??
)
self.mri_obj.opacity = self._initial_kwargs['head_opacity']
self.data_panel.fid_panel.hsp_obj = self.mri_obj
# Do not do sync_trait here, instead use notifiers elsewhere
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color,
has_norm=True, point_scale=point_scale,
name='LPA')
self.model.sync_trait('transformed_mri_lpa',
self.mri_lpa_obj, 'points', mutual=False)
self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color,
has_norm=True,
point_scale=point_scale,
name='Nasion')
self.model.sync_trait('transformed_mri_nasion',
self.mri_nasion_obj, 'points', mutual=False)
self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color,
has_norm=True, point_scale=point_scale,
name='RPA')
self.model.sync_trait('transformed_mri_rpa',
self.mri_rpa_obj, 'points', mutual=False)
# Digitizer Head Shape
kwargs = dict(
view='cloud', scene=self.scene, resolution=20,
orient_to_surface=self._initial_kwargs['orient_to_surface'],
scale_by_distance=self._initial_kwargs['scale_by_distance'],
mark_inside=self._initial_kwargs['mark_inside'])
self.hsp_obj = PointObject(
color=defaults['extra_color'], name='Extra', has_norm=True,
point_scale=defaults['extra_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_points',
self.hsp_obj, 'points', mutual=False)
# Digitizer EEG
self.eeg_obj = PointObject(
color=defaults['eeg_color'], point_scale=defaults['eeg_scale'],
name='EEG', projectable=True, has_norm=True,
project_to_surface=self._initial_kwargs['project_eeg'], **kwargs)
self.model.sync_trait('transformed_hsp_eeg_points',
self.eeg_obj, 'points', mutual=False)
# Digitizer HPI
self.hpi_obj = PointObject(
color=defaults['hpi_color'], name='HPI', has_norm=True,
point_scale=defaults['hpi_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_hpi',
self.hpi_obj, 'points', mutual=False)
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj):
p.inside_color = self.mri_obj.color
self.mri_obj.sync_trait('color', p, 'inside_color',
mutual=False)
# Digitizer Fiducials
point_scale = defaults['dig_fid_scale']
opacity = defaults['dig_fid_opacity']
self.hsp_lpa_obj = PointObject(
scene=self.scene, color=lpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-LPA')
self.model.sync_trait('transformed_hsp_lpa',
self.hsp_lpa_obj, 'points', mutual=False)
self.hsp_nasion_obj = PointObject(
scene=self.scene, color=nasion_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-Nasion')
self.model.sync_trait('transformed_hsp_nasion',
self.hsp_nasion_obj, 'points', mutual=False)
self.hsp_rpa_obj = PointObject(
scene=self.scene, color=rpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-RPA')
self.model.sync_trait('transformed_hsp_rpa',
self.hsp_rpa_obj, 'points', mutual=False)
# All points share these
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj,
self.hsp_lpa_obj, self.hsp_nasion_obj, self.hsp_rpa_obj):
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.data_panel.fid_panel._on_pick, type='cell')
# Coordinate frame axes
self.mri_cf_obj = PointObject(
scene=self.scene, color=self.mri_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='MRI', view='arrow')
self.mri_obj.sync_trait('color', self.mri_cf_obj, mutual=False)
self._update_mri_axes()
self.hsp_cf_obj = PointObject(
scene=self.scene, color=self.hsp_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='Head', view='arrow')
self.hsp_cf_obj.sync_trait('color', self.hsp_cf_obj, mutual=False)
self._update_hsp_axes()
self.sync_trait('bgcolor', self.scene, 'background')
self._update_mri_obj()
self._update_projections()
self.mri_obj.plot()
_toggle_mlab_render(self, True)
self.scene.render()
self.scene.camera.focal_point = (0., 0., 0.)
self.data_panel.view_options_panel = ViewOptionsPanel(
mri_obj=self.mri_obj, hsp_obj=self.hsp_obj,
eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj,
hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj,
head_high_res=self.head_high_res,
bgcolor=self.bgcolor)
self.data_panel.headview.scale = self._initial_kwargs['scale']
self.data_panel.headview.interaction = \
self._initial_kwargs['interaction']
self.data_panel.headview.left = True
self.data_panel.view_options_panel.sync_trait(
'coord_frame', self.model)
self.data_panel.view_options_panel.sync_trait('head_high_res', self)
self.data_panel.view_options_panel.sync_trait('bgcolor', self)
@on_trait_change('lock_fiducials')
def _on_lock_change(self):
if not self.lock_fiducials:
if self.mri_obj is None:
self._initial_kwargs['head_opacity'] = 1.
else:
self._locked_opacity = self.mri_obj.opacity
self.mri_obj.opacity = 1.
else:
if self.mri_obj is not None:
self.mri_obj.opacity = self._locked_opacity
@cached_property
def _get_hsp_visible(self):
return self.data_panel.hsp_always_visible or self.lock_fiducials
@on_trait_change('model:mri_trans')
def _update_mri_axes(self):
if self.mri_cf_obj is None:
return
nn = apply_trans(self.model.mri_trans, np.eye(3), move=False)
pts = apply_trans(self.model.mri_trans, np.zeros((3, 3)))
self.mri_cf_obj.nn = nn
self.mri_cf_obj.points = pts
@on_trait_change('model:hsp_trans')
def _update_hsp_axes(self):
if self.hsp_cf_obj is None:
return
nn = apply_trans(self.model.hsp_trans, np.eye(3), move=False)
pts = apply_trans(self.model.hsp_trans, np.zeros((3, 3)))
self.hsp_cf_obj.nn = nn
self.hsp_cf_obj.points = pts
@on_trait_change('model:mri:bem_low_res:surf,'
'model:transformed_low_res_mri_points')
def _update_projections(self):
for p in (self.eeg_obj, self.hsp_obj, self.hpi_obj):
if p is not None:
p.project_to_tris = self.model.mri.bem_low_res.surf.tris
p.project_to_points = self.model.transformed_low_res_mri_points
@on_trait_change('model:mri:bem_low_res:surf,head_high_res,'
'model:transformed_high_res_mri_points')
def _update_mri_obj(self):
if self.mri_obj is None:
return
self.mri_obj.tris = getattr(
self.model.mri, 'bem_%s_res'
% ('high' if self.head_high_res else 'low',)).surf.tris
self.mri_obj.points = getattr(
self.model, 'transformed_%s_res_mri_points'
% ('high' if self.head_high_res else 'low',))
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model:mri:fid_file')
def _on_fid_file_loaded(self):
self.data_panel.fid_panel.locked = bool(self.model.mri.fid_file)
def save_config(self, home_dir=None, size=None):
"""Write configuration values."""
def s_c(key, value, lower=True):
value = str(value)
if lower:
value = value.lower()
set_config(key, str(value).lower(), home_dir=home_dir,
set_env=False)
s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject)
s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res)
if self.lock_fiducials:
opacity = self.mri_obj.opacity
else:
opacity = self._locked_opacity
s_c('MNE_COREG_HEAD_OPACITY', opacity)
if size is not None:
s_c('MNE_COREG_WINDOW_WIDTH', size[0])
s_c('MNE_COREG_WINDOW_HEIGHT', size[1])
s_c('MNE_COREG_SCENE_SCALE', self.data_panel.headview.scale)
s_c('MNE_COREG_SCALE_LABELS', self.model.scale_labels)
s_c('MNE_COREG_COPY_ANNOT', self.model.copy_annot)
s_c('MNE_COREG_PREPARE_BEM', self.model.prepare_bem_model)
if self.model.mri.subjects_dir:
s_c('MNE_COREG_SUBJECTS_DIR', self.model.mri.subjects_dir, False)
s_c('MNE_COREG_PROJECT_EEG', self.project_to_surface)
s_c('MNE_COREG_ORIENT_TO_SURFACE', self.orient_to_surface)
s_c('MNE_COREG_SCALE_BY_DISTANCE', self.scale_by_distance)
s_c('MNE_COREG_MARK_INSIDE', self.mark_inside)
s_c('MNE_COREG_INTERACTION', self.data_panel.headview.interaction)
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'], cwd=self.get_dir())
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
run_process([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
data = os.path.join(self.get_dir(), 'file.txt')
open(data, 'w').write('''Hello!''')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', data + '@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), os.path.join(self.get_dir(), 'manual_download_data.html'))
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
run_process([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')])
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
def test():
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//out('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//out('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'test_glfw_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
@unittest.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'EXIT_RUNTIME=1']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
open('file1.txt', 'w').write('0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3 * 1024 * 128 * 10 + 1
assert os.stat('test.data').st_size < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
open('data.dat', 'w').write(' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
@requires_graphics_hardware
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
run_process([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html', '-lEGL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
run_process([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html', '-lEGL', '-lGL'])
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def do_test_worker(self, args=[]):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.test_port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
assert os.path.exists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.test_port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
run_process([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.test_port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args,
timeout=30)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first')
open('file2.txt', 'w').write('second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_chrome("RELOCATABLE=1 forces synchronous compilation which chrome doesn't support")
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'SPLIT_MEMORY=16777216', '-s', 'WASM=0']) # check for uniform4fv slice being valid in split memory
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
run_process([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
run_process([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.test_port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.test_port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.test_port
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=1']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_preload_module(self):
open('library.c', 'w').write(r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
run_process([PYTHON, EMCC, 'library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins'],
expected='0')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args += ['--browser_args', ' ' + ' '.join(browser_args)]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
run_process([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE)
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0', timeout=20)
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/kripken/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
run_process([PYTHON, EMCC, 'second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), os.path.join(self.get_dir(), 'cursor.bmp'))
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
run_process([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'Cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
with open('pre.js', 'w') as f:
f.write('Error.stackTraceLimit = 80;\n')
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3', '--pre-js', 'pre.js', ])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'BINARYEN_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
run_process([PYTHON, EMCC, 'test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@no_chrome("required synchronous wasm compilation")
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
print('wasm in worker (we can read binary data synchronously there)')
open('pre.js', 'w').write('''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker'])
print('wasm (will auto-preload since no sync binary reading)')
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1'])
@requires_graphics_hardware
@no_chrome("required synchronous wasm compilation")
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt + debug + f32 + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
print(str(opt))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'])
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [[], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# test atomicrmw i64
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
run_process([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts)
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2 * 1024 * 1024
open('huge.dat', 'wb').write(bytearray((x * x) & 255 for x in range(size * 2))) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=128MB', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen_interpreter(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
@no_chrome("chrome doesn't support synchronous compilation over 4k")
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
(['-s', 'BINARYEN_ASYNC_COMPILATION=1', '-s', 'BINARYEN_METHOD="interpret-binary"'], 0), # try to force it on, but have it disabled
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), os.path.join(self.get_dir(), 'manual_wasm_instantiate.html'))
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open('shell2.html', 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'WASM=0'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@no_chrome('depends on moz-chunked-arraybuffer')
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'hello_file.txt'))
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Tests the absolute minimum pthread-enabled application.
def test_hello_thread(self):
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = os.path.join(self.get_dir(), 'src.c')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
run_process([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), os.path.join(self.get_dir(), 'hello_thread_with_blob_url.html'))
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='interpret-binary'"]
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
open('a.html', 'w').write('''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"], also_proxied=True)
assert os.path.exists('test.html') and not os.path.exists('test.js') and not os.path.exists('test.worker.js')
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
open('test.html', 'w').write('''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"])
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
assert os.path.exists('test.js') and not os.path.exists('test.worker.js')
def test_access_file_after_heap_resize(self):
open('test.txt', 'w').write('hello from file')
open('page.c', 'w').write(self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
open(self.in_dir('main.cpp'), 'w').write(self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write(src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write('''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
|
gist_historical_example.py
|
# Gist example of IB wrapper from here: https://gist.github.com/robcarver17/f50aeebc2ecd084f818706d9f05c1eb4
#
# Download API from http://interactivebrokers.github.io/#
# (must be at least version 9.73)
#
# Install python API code /IBJts/source/pythonclient $ python3 setup.py install
#
# Note: The test cases, and the documentation refer to a python package called IBApi,
# but the actual package is called ibapi. Go figure.
#
# Get the latest version of the gateway:
# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix
# (for unix: windows and mac users please find your own version)
#
# Run the gateway
#
# user: edemo
# pwd: demo123
#
# duration units and bar sizes:
# https://interactivebrokers.github.io/tws-api/historical_bars.html#hd_duration
# limitations:
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
import os
import time
import pprint
import queue
import datetime
from pytz import timezone
import pandas as pd
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
from ibapi.wrapper import EWrapper
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from threading import Thread
DEFAULT_HISTORIC_DATA_ID = 50
DEFAULT_GET_CONTRACT_ID = 43
DEFAULT_GET_NP_ID = 42
DEFAULT_GET_EARLIEST_ID = 1
DEFAULT_HISTORIC_NEWS_ID = 1001
## marker for when queue is finished
FINISHED = object()
STARTED = object()
TIME_OUT = object()
class finishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue = []
finished = False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
## keep going and try and get more data
except queue.Empty:
## If we hit a time out it's most probable we're not getting a finished element any time soon
## give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class TestWrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
self._my_contract_details = {}
self._my_historic_data_dict = {}
self._my_earliest_timestamp_dict = {}
self._my_np_dict = {}
self._my_hn_dict = {}
self._my_na_dict = {}
self._my_errors = queue.Queue()
## error handling code
def init_error(self):
error_queue = queue.Queue()
self._my_errors = error_queue
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
## get contract details code
def init_contractdetails(self, reqId):
self._my_contract_details[reqId] = queue.Queue()
return self._my_contract_details[reqId]
def contractDetails(self, reqId, contractDetails):
## overridden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
## overriden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(FINISHED)
def init_historicprices(self, tickerid):
self._my_historic_data_dict[tickerid] = queue.Queue()
return self._my_historic_data_dict[tickerid]
def init_earliest_timestamp(self, tickerid):
self._my_earliest_timestamp_dict[tickerid] = queue.Queue()
return self._my_earliest_timestamp_dict[tickerid]
def init_np(self, tickerid):
self._my_np_dict[tickerid] = queue.Queue()
return self._my_np_dict[tickerid]
def init_hn(self, requestId):
self._my_hn_dict[requestId] = queue.Queue()
return self._my_hn_dict[requestId]
def init_na(self, requestId):
self._my_na_dict[requestId] = queue.Queue()
return self._my_na_dict[requestId]
def historicalData(self, tickerid, bar):
## Overriden method
## Note I'm choosing to ignore barCount, WAP and hasGaps but you could use them if you like
# pprint.pprint(bar.__dict__)
bardata = (bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
historic_data_dict = self._my_historic_data_dict
## Add on to the current data
if tickerid not in historic_data_dict.keys():
self.init_historicprices(tickerid)
historic_data_dict[tickerid].put(bardata)
def headTimestamp(self, tickerid, headTimestamp:str):
## overridden method
if tickerid not in self._my_earliest_timestamp_dict.keys():
self.init_earliest_timestamp(tickerid)
self._my_earliest_timestamp_dict[tickerid].put(headTimestamp)
self._my_earliest_timestamp_dict[tickerid].put(FINISHED)
def newsProviders(self, newsProviders):
## overridden method
tickerid = DEFAULT_GET_NP_ID
if tickerid not in self._my_np_dict.keys():
self.init_np(tickerid)
self._my_np_dict[tickerid].put(newsProviders)
self._my_np_dict[tickerid].put(FINISHED)
def historicalDataEnd(self, tickerid, start:str, end:str):
## overriden method
if tickerid not in self._my_historic_data_dict.keys():
self.init_historicprices(tickerid)
self._my_historic_data_dict[tickerid].put(FINISHED)
def historicalNews(self, requestId, time, providerCode, articleId, headline):
newsdata = (time, providerCode, articleId, headline)
newsdict = self._my_hn_dict
if requestId not in newsdict.keys():
self.init_hn(requestId)
newsdict[requestId].put(newsdata)
def historicalNewsEnd(self, requestId, hasMore):
if requestId not in self._my_hn_dict.keys():
self.init_hn(requestId)
if hasMore:
print('more results available')
self._my_hn_dict[requestId].put(FINISHED)
def newsArticle(self, requestId, articleType, articleText):
if requestId not in self._my_na_dict.keys():
self.init_na(requestId)
self._my_na_dict[requestId].put((articleType, articleText))
self._my_na_dict[requestId].put(FINISHED)
class TestClient(EClient):
"""
The client method
We don't override native methods, but instead call them from our own wrappers
"""
def __init__(self, wrapper):
## Set up with a wrapper inside
EClient.__init__(self, wrapper)
def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
## Make a place to store the data we're going to return
contract_details_queue = finishableQueue(self.init_contractdetails(reqId))
print("Getting full contract details from the server... ")
self.reqContractDetails(reqId, ibcontract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 3
new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if contract_details_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details)==0:
print("Failed to get additional contract details: returning unresolved contract")
return ibcontract
if len(new_contract_details)>1:
print("got multiple contracts; using first one")
new_contract_details = new_contract_details[0]
resolved_ibcontract = new_contract_details.contract
return resolved_ibcontract, new_contract_details
def get_IB_historical_data(self,
ibcontract,
whatToShow="TRADES",
durationStr="1 Y",
barSizeSetting="1 day",
tickerid=DEFAULT_HISTORIC_DATA_ID,
latest_date=None):
"""
Returns historical prices for a contract, up to latest_date
if latest_date is none, uses todays date
latest_date should be of form %Y%m%d %H:%M:%S %Z
ibcontract is a Contract
:returns list of prices in 4 tuples: Open high low close volume
"""
# set latest_date to today and now if it is None
if latest_date is None:
latest_date = get_latest_date_local()
## Make a place to store the data we're going to return
historic_data_queue = finishableQueue(self.init_historicprices(tickerid))
# Request some historical data. Native method in EClient
self.reqHistoricalData(
tickerid, # tickerId,
ibcontract, # contract,
latest_date, # endDateTime,
durationStr, # durationStr,
barSizeSetting, # barSizeSetting,
whatToShow=whatToShow,
useRTH=1,
formatDate=1,
keepUpToDate=False, # <<==== added for api 9.73.2
chartOptions=[] ## chartOptions not used
)
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
print("Getting historical data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
historic_data = historic_data_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if historic_data_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelHistoricalData(tickerid)
# convert to pandas dataframe
# date, open, high, low, close, vol
# already adjusted for splits
if len(historic_data) != 0:
df = pd.DataFrame.from_records(data=historic_data, index='datetime', columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])
df.index = pd.to_datetime(df.index)
if whatToShow != 'TRADES':
# volume only available for trades
df.drop('volume', axis=1, inplace=True)
return df
else:
return historic_data
def getEarliestTimestamp(self, contract, whatToShow='TRADES', useRTH=1, formatDate=1, tickerid=DEFAULT_GET_EARLIEST_ID):
# parameters: https://interactivebrokers.github.io/tws-api/classIBApi_1_1EClient.html#a059b5072d1e8e8e96394e53366eb81f3
## Make a place to store the data we're going to return
earliest_timestamp_queue = finishableQueue(self.init_earliest_timestamp(tickerid))
self.reqHeadTimeStamp(tickerid, contract, whatToShow, useRTH, formatDate)
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
print("Getting eariest timestamp from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
earliest = earliest_timestamp_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if earliest_timestamp_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelHeadTimeStamp(tickerid)
return earliest[0] # first element in list
def getNewsProviders(self):
"""
available news providers by default are
[140007057343600: BRFG, Briefing.com General Market Columns,
140007057342704: BRFUPDN, Briefing.com Analyst Actions,
140007057343544: DJNL, Dow Jones Newsletters]
"""
## Make a place to store the data we're going to return
tickerid = DEFAULT_GET_NP_ID
np_queue = finishableQueue(self.init_np(tickerid))
# Request news providers. Native method in EClient
self.reqNewsProviders()
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
print("Getting list of news providers from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
nps = np_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if np_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return nps[0] # list within a list
def getHistoricalNews(self, reqId, conId, providerCodes, startDateTime, endDateTime, totalResults):
hn_queue = finishableQueue(self.init_hn(reqId))
self.reqHistoricalNews(reqId, conId, providerCodes, startDateTime, endDateTime, totalResults, historicalNewsOptions=[])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 15
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
hn = hn_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if hn_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return hn
def getNewsArticle(self, reqId, providerCode, articleId):
na_queue = finishableQueue(self.init_na(reqId))
self.reqNewsArticle(reqId, providerCode, articleId, [])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
na = na_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if na_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return na
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error()
def get_hist_data_date_range(self,
ibcontract,
whatToShow='TRADES',
barSizeSetting='3 mins',
start_date=None,
end_date=None,
tickerid=DEFAULT_HISTORIC_DATA_ID):
"""
gets historic data for date range
if start_date is None, then first finds earliest date available,
and gets all data to there
if end_date is None, will get data to latest possible time
start_date and end_date should be strings in format YYYYMMDD
useful options for whatToShow for stocks can be:
TRADES
BID
ASK
OPTION_IMPLIED_VOLATILITY
HISTORICAL_VOLATILITY
"""
smallbars = ['1 secs', '5 secs', '10 secs', '15 secs', '30 secs', '1 min']
max_step_sizes = {'1 secs': '1800 S', # 30 mins
'5 secs': '3600 S', # 1 hour
'10 secs': '14400 S', # 4 hours
'15 secs': '14400 S', # 4 hours
'30 secs': '28800 S', # 8 hours
'1 min': '1 D',
'2 mins': '2 D',
'3 mins': '1 W',
'5 mins': '1 W',
'10 mins': '1 W',
'15 mins': '1 W',
'20 mins': '1 W',
'30 mins': '1 M',
'1 hour': '1 M',
'2 hours': '1 M',
'3 hours': '1 M',
'4 hours': '1 M',
'8 hours': '1 M',
'1 day': '1 Y',
'1 week': '1 Y',
'1 month': '1 Y'}
# TODO: check if earliest timestamp is nothing or before/after end_date
earliest_timestamp = self.getEarliestTimestamp(ibcontract, whatToShow=whatToShow)
earliest_datestamp = earliest_timestamp[:8]
# if timeout, will return empty list
df = []
if end_date is None:
latest_date = None
else:
# TODO: need to adopt this to other than mountain time
latest_date = end_date + ' ' + get_close_hour_local() + ':00:00'
while type(df) is list:
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=latest_date)
earliest_date = df.index[0]
full_df = df
self.df = full_df
# keep going until the same result is returned twice...not perfectly efficient but oh well
previous_earliest_date = None
i = 0
start_time = time.time()
is_list = 0
while previous_earliest_date != earliest_date:
i += 1
print(i)
print(previous_earliest_date)
print(earliest_date)
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=earliest_date.strftime('%Y%m%d %H:%M:%S'))
if type(df) is list:
is_list += 1
# we've probably hit the earliest time we can get
if is_list >= 3 and earliest_date.date().strftime('%Y%m%d') == earliest_datestamp:
break
if is_list >= 10:
break
continue
previous_earliest_date = earliest_date
earliest_date = df.index[0]
full_df = pd.concat([df, full_df])
self.df = full_df
is_list = 0
# no more than 6 requests every 2s for bars under 30s
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
# TODO: take care of 60 requests per 10 mins
if barSizeSetting in smallbars and i >= 6:
time_left = 2 - (time.time() - start_time())
i = 0
time.sleep(time_left)
return full_df
def get_stock_contract(self, ticker='SNAP', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.secType = 'STK'
# get todays date, format as YYYYMMDD -- need to check this is correct
# today = datetime.datetime.today().strftime('%Y%m%d')
# ibcontract.lastTradeDateOrContractMonth = '20180711'#today
ibcontract.symbol = ticker
ibcontract.exchange = 'ISLAND'
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def download_all_history_stock(self, ticker='SNAP', barSizeSetting='3 mins'):
"""
downloads all historical data for a stock including
TRADES
BID
ASK
OPTION_IMPLIED_VOLATILITY
if data already exists, updates and appends to it
"""
contract = self.get_stock_contract(ticker=ticker)
folder = 'data/'
start_date = None
mode = 'w'
if os.path.exists(folder + ticker + '_trades.h5'):
cur_trades = pd.read_hdf(ticker + '_trades.h5')
latest_datetime = cur_trades.index[-1]
start_date = latest_datetime.strftime('%Y%m%d')
mode = 'r+' # append to existing files, should throw error if they don't exist
end_date = None#'20170401' # smaller amount of data for prototyping/testing
trades = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, end_date=end_date, start_date=start_date)
bid = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='BID', end_date=end_date, start_date=start_date)
ask = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='ASK', end_date=end_date, start_date=start_date)
opt_vol = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='OPTION_IMPLIED_VOLATILITY', end_date=end_date, start_date=start_date)
bss = barSizeSetting.replace(' ', '_')
trades.to_hdf(folder + ticker + '_trades_' + bss + '.h5', key='data', format='table', complevel=9, complib='blosc:lz4', mode=mode)
bid.to_hdf(folder + ticker + '_bid_' + bss + '.h5', key='data', format='table', complevel=9, complib='blosc:lz4', mode=mode)
ask.to_hdf(folder + ticker + '_ask_' + bss + '.h5', key='data', format='table', complevel=9, complib='blosc:lz4', mode=mode)
opt_vol.to_hdf(folder + ticker + '_opt_vol_' + bss + '.h5', key='data', format='table', complevel=9, complib='blosc:lz4', mode=mode)
def get_earliest_dates(self, ticker):
contract = self.get_stock_contract(ticker=ticker)
for t in ['TRADES', 'BID', 'ASK', 'OPTION_IMPLIED_VOLATILITY']:
earliest = self.getEarliestTimestamp(contract, tickerid=200)
print(t)
print(earliest)
def get_datetime_from_date(date='2018-06-30'):
"""
not sure if I need this anymore...
converts a date to a datetime (end-of-day) for historical data gathering
date should be a string in format YYYYMMDD
uses eastern timezone (EDT or EST) by default
TODO: convert eastern to local timezone from machine
"""
tz='US/Eastern'
tz_obj = timezone(tz)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
date = date.replace(hour = 16, minute = 0, second = 0)
date = tz_obj.localize(date)
return date.strftime('%Y%m%d %H:%M:%S %Z')
def get_latest_date_local():
"""
gets the latest date with the machine's local timezone
endDateTime and startDateTime "Uses TWS timezone specified at login."
at least for tick-by-tick data
"""
machines_tz = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname()
latest_date = datetime.datetime.today()
# doesn't work with machines tz in there
latest_date = latest_date.strftime('%Y%m%d %H:%M:%S')# + machines_tz
return latest_date
def get_close_hour_local():
"""
gets closing hour in local machine time (4 pm Eastern)
"""
eastern_tz = timezone('US/Eastern')
eastern_close = datetime.datetime(year=2018, month=6, day=29, hour=16)
eastern_close = eastern_tz.localize(eastern_close)
return str(eastern_close.astimezone().hour)
def load_data(ticker='SNAP', barSizeSetting='3 mins'):
"""
loads historical tick data
"""
folder = 'data/'
bss = barSizeSetting.replace(' ', '_')
trades = pd.read_hdf(folder + ticker + '_trades_' + bss + '.h5')
# fill 0 volume with 1
trades.at[trades['volume'] == 0, 'volume'] = 1
bid = pd.read_hdf(folder + ticker + '_bid_' + bss + '.h5')
ask = pd.read_hdf(folder + ticker + '_ask_' + bss + '.h5')
opt_vol = pd.read_hdf(folder + ticker + '_opt_vol_' + bss + '.h5')
# drop duplicates just in case...dupes throw off concat
trades.drop_duplicates(inplace=True)
bid.drop_duplicates(inplace=True)
ask.drop_duplicates(inplace=True)
opt_vol.drop_duplicates(inplace=True)
# rename columns so can join to one big dataframe
bid.columns = ['bid_' + c for c in bid.columns]
ask.columns = ['ask_' + c for c in ask.columns]
opt_vol.columns = ['opt_vol_' + c for c in opt_vol.columns]
# inner join should drop na's but just to be safe
full_df = pd.concat([trades, bid, ask, opt_vol], axis=1, join='inner').dropna()
return full_df
def make_feats_targs_individ_df(df, future_gap_idx_steps, future_span_idx_steps, feature_span, feature_span_idx_steps):
"""
"""
targets = df['close'].pct_change(future_span_idx_steps).shift(-future_gap_idx_steps - future_span_idx_steps)
pct_change_features = df.copy().pct_change(feature_span_idx_steps, axis=0)
pct_change_features.columns = [c + '_' + str(feature_span) + '_min_pct_chg' for c in pct_change_features.columns]
df['targets'] = targets
# inner join should drop na's but just to be safe
feats_targs = pd.concat([df, pct_change_features], axis=1, join='inner').dropna()
feat_cols = [c for c in feats_targs.columns if c != 'targets']
return feats_targs[feat_cols], feats_targs['targets']
def make_features_targets(full_df, future_gap=0, future_span=15, feature_span=15, intraday=True):
"""
uses close price to make targets -- percent change over certain time in future
features are percent change of other columns as well as raw values
future_gap is number of minutes between current time and start of future pct_change
future_span is number of minutes to calculate price percent change
feature_span is number of minutes to calculate pct change of everything in df
intraday is boolean; if True, will only get features/targs within each day
and not extending over close/open times
"""
# copy full_df so we don't modify it
full_df_copy = full_df.copy()
# get number of minutes between timesteps -- won't work if not integer minutes
minute_gap = (full_df_copy.index[1] - full_df_copy.index[0]).seconds // 60
future_gap_idx_steps = future_gap // minute_gap
future_span_idx_steps = future_span // minute_gap
feature_span_idx_steps = feature_span // minute_gap
# TODO: use dask or multicore/multithread
if intraday:
# get dataframes for each day and make feats targs, then join
days = [idx.date() for idx in full_df_copy.index]
unique_days = np.unique(days)
all_feats, all_targs = [], []
for d in tqdm(unique_days):
df = full_df_copy[full_df_copy.index.date == d].copy()
d_feats, d_targs = make_feats_targs_individ_df(df,
future_gap_idx_steps=future_gap_idx_steps,
future_span_idx_steps=future_span_idx_steps,
feature_span=feature_span,
feature_span_idx_steps=feature_span_idx_steps)
all_feats.append(d_feats)
all_targs.append(d_targs)
return pd.concat(all_feats), pd.concat(all_targs)
else:
# get feats and targets in bulk
return make_feats_targs_individ_df(full_df,
future_gap_idx_steps=future_gap_idx_steps,
future_span_idx_steps=future_span_idx_steps,
feature_span=feature_span,
feature_span_idx_steps=feature_span_idx_steps)
def check_autocorrelations():
"""
checks autocorrelations between previous price changes and future price changes
over a range of timesteps
"""
if __name__ == '__main__':
app = TestApp("127.0.0.1", 7496, 1)
# app.getNewsProviders()
aapl, aapl_details = app.get_stock_contract(ticker='AAPL', reqId=304)
# IBM conId = 8314
hn1 = app.getHistoricalNews(reqId=DEFAULT_HISTORIC_NEWS_ID, conId=aapl.conId, providerCodes='BRFG', startDateTime="", endDateTime="", totalResults=1000)
hn2 = app.getHistoricalNews(reqId=DEFAULT_HISTORIC_NEWS_ID, conId=aapl.conId, providerCodes='BRFUPDN', startDateTime="", endDateTime="", totalResults=1000)
hn3 = app.getHistoricalNews(reqId=DEFAULT_HISTORIC_NEWS_ID, conId=aapl.conId, providerCodes='DJNL', startDateTime="", endDateTime="", totalResults=1000)
na1 = app.getNewsArticle(1009, providerCode='BRFG', articleId=hn1[0][2])
na2 = app.getNewsArticle(1009, providerCode='BRFUPDN', articleId=hn2[0][2])
# aapl = app.get_stock_contract(ticker='AAPL')
# app.getEarliestTimestamp(contract=aapl, whatToShow='OPTION_IMPLIED_VOLATILITY')
# np.unique(df[df['opt_vol_high'] > 0.5].index.date)
# plt.scatter(df['opt_vol_high'], df['close'])
# # tickers = ['IQ', 'TSLA', 'AMD', 'AYX', 'LNG', 'MU', 'AAPL']
# tickers = ['AAPL']
# for t in tickers:
# app.download_all_history_stock(ticker=t)
def snap_analysis():
ticker = 'SNAP'
full_df = load_data(ticker=ticker)
feature_span = 30
feats, targs = make_features_targets(full_df, future_span=30, feature_span=feature_span)
feats_targs = feats.copy()
# make bid-ask close differences
# high and open seem to be most correlated
feats_targs['ask_bid_close'] = feats_targs['ask_close'] - feats_targs['bid_close']
feats_targs['ask_bid_open'] = feats_targs['ask_open'] - feats_targs['bid_open']
feats_targs['ask_bid_high'] = feats_targs['ask_high'] - feats_targs['bid_high']
feats_targs['ask_bid_low'] = feats_targs['ask_low'] - feats_targs['bid_low']
feats_targs['targets'] = targs
# with future_gap=0, future_span=15, feature_span=15, intraday=True
#
# with future_span=60, feature_span=60
#
# with future_span=30, feature_span=30
#
f = plt.figure(figsize=(12, 12))
sns.heatmap(feats_targs.corr())
plt.tight_layout()
# features that are highly correlated:
# OHLC with all bids and OHLC -- keep only close
# all bids with all bids
# same for OHLC and bid changes
# all opt_vol with each other, except high with others is a bit less correlated
# volume with itself and % change
# bid/ask with itself and each other
# gets more correlated with target as time shortens
f = plt.figure(figsize=(12, 12))
sns.heatmap(feats_targs.iloc[-10000:].corr())
plt.tight_layout()
plt.scatter(feats['close_15_min_pct_chg'], targs)
plt.scatter(feats['close'], targs)
# when opt_vol_high is very high, seems to be highly correlated with price change over 30 mins
plt.scatter(feats['opt_vol_high'], targs)
# all on one day -- 12/09/2017
feats_targs[['opt_vol_high', 'targets']][feats['opt_vol_high'] > 5]
# nothing for opt vol low for SNAP
plt.scatter(feats['opt_vol_low'], targs)
targs.plot.hist(bins=30)
from sklearn.ensemble import RandomForestRegressor
# trim features
feats_trimmed = feats.copy()
fs = str(feature_span)
droplist = ['open',
'high',
'low',
'close',
'bid_open',
'bid_high',
'bid_low',
'bid_close',
'ask_open',
'ask_high',
'ask_low',
'ask_close',
'opt_vol_open',
'opt_vol_low',
'open_' + fs + '_min_pct_chg',
'high_' + fs + '_min_pct_chg',
'low_' + fs + '_min_pct_chg',
'bid_open_' + fs + '_min_pct_chg',
'bid_high_' + fs + '_min_pct_chg',
'bid_low_' + fs + '_min_pct_chg',
'bid_close_' + fs + '_min_pct_chg',
'ask_open_' + fs + '_min_pct_chg',
'ask_high_' + fs + '_min_pct_chg',
'ask_low_' + fs + '_min_pct_chg',
'ask_close_' + fs + '_min_pct_chg']
feats_trimmed.drop(droplist, axis=1, inplace=True)
# take last 25% of data -- about 2 months for SNAP currently (7-2018)
trim_loc = int(0.75 * feats_trimmed.shape[0])
feats_trimmed_small = feats_trimmed.iloc[trim_loc:]
targs_trimmed_small = targs.iloc[trim_loc:]
train_size = 0.85
train_idx = int(train_size * feats_trimmed_small.shape[0])
train_feats = feats_trimmed_small.iloc[:train_idx]
train_targs = targs_trimmed_small.iloc[:train_idx]
test_feats = feats_trimmed_small.iloc[train_idx:]
test_targs = targs_trimmed_small.iloc[train_idx:]
start = time.time()
rfr = RandomForestRegressor(n_estimators=500, n_jobs=-1, min_samples_split=10, random_state=42)
end = time.time()
rfr.fit(train_feats, train_targs)
print('training took:', int(start - end), 'seconds')
print(rfr.score(train_feats, train_targs))
print(rfr.score(test_feats, test_targs))
plt.scatter(train_targs, rfr.predict(train_feats))
plt.scatter(test_targs, rfr.predict(test_feats))
feature_importances = rfr.feature_importances_
fi_idx = np.argsort(feature_importances)[::-1]
x = np.arange(len(feature_importances))
f = plt.figure(figsize=(12, 12))
plt.bar(x, feature_importances[fi_idx])
plt.xticks(x, train_feats.columns[fi_idx], rotation=90)
plt.tight_layout()
plt.scatter(feats_trimmed_small['close'], targs_trimmed_small)
#snap_contract = app.get_stock_contract(ticker='SNAP')
# seems to be a bit more data for 3m 1W compared with 1m 1D (650 vs 390)
# historic_data = app.get_IB_historical_data(snap_contract, durationStr="1 D", barSizeSetting="1 min", latest_date='20170305 14:00:00')#'20180504 14:30:00')
# seems to have weird issues with short bars, and seems to be a long-term indicator
# snap_vol = app.get_hist_data_date_range(snap_contract, barSizeSetting='3 mins', whatToShow='HISTORICAL_VOLATILITY', end_date='20170425')
# get earliest timestamp
# earliest = app.getEarliestTimestamp(snap_contract, tickerid=200)
# get list of news providers
# nps = app.getNewsProviders()
# look for period with highest autocorrelation and use that as prediction period
# make a learning curve, look at scores with varying amount of data
#app.disconnect()
|
wrap_test.py
|
from functools import wraps
from threading import Thread, Lock
counter = 0
def synchronized(lock):
""" Synchronization decorator. """
def real_wrapper(function):
@wraps(function)
def wrapper(*args, **kwargs):
global lock
lock.acquire()
try:
return function(*args, **kwargs)
finally:
lock.release()
return wrapper
return real_wrapper
def worker():
a.count += 1
class A():
def __init__(self):
self.count = 0
self.lock = Lock()
a = A()
threads = []
for i in range(10):
t = Thread(target=worker)
threads.append(t)
t.start()
print(a.count)
|
ews.py
|
# vim: sw=4:ts=4:et:cc=120
import collections
import importlib
import logging
import os, os.path
import sqlite3
import threading
from urllib.parse import urlparse
import saq
from saq.constants import *
from saq.collectors import Collector, Submission
from saq.error import report_exception
from saq.util import local_time
from exchangelib import DELEGATE, IMPERSONATION, Account, Credentials, OAuth2Credentials, \
FaultTolerance, Configuration, NTLM, GSSAPI, SSPI, OAUTH2, Build, Version
from exchangelib.errors import ResponseMessageError, ErrorTimeoutExpired
from exchangelib.protocol import BaseProtocol
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, ReadTimeout
#
# EWS Collector
# collects emails from Exchange accounts using EWS
#
class RootCAAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cert_file_map = None
def cert_verify(self, conn, url, verify, cert):
if self.cert_file_map is None:
# load any SSL verification options
self.cert_file_map = collections.defaultdict(lambda: True)
for option, value in saq.CONFIG['ews'].items():
if option.startswith('ssl_ca_'):
fqdn, ssl_value = [_.strip() for _ in value.split(':', 1)]
# the value can optionally be a boolean which tells the requests library
# to verify or not
if ssl_value in [ 'yes', 'no' ]:
ssl_value = ssl_value == 'yes'
logging.debug(f"mapping ews ssl verification for {fqdn} to {ssl_value}")
self.cert_file_map[fqdn] = ssl_value
super().cert_verify(conn=conn, url=url, verify=self.cert_file_map[urlparse(url).hostname], cert=cert)
class EWSCollectionBaseConfiguration(object):
def __init__(self, collector, *args, **kwargs):
super().__init__(*args, **kwargs)
self.collector = collector
self.username = None
self.password = None
self.server = None
self.target_mailbox = None
self.frequency = 60
self.delete_emails = False
self.always_alert = False
self.alert_prefix = None
self.folders = []
BaseProtocol.HTTP_ADAPTER_CLS = RootCAAdapter
# primary execution thread
self.execution_thread = None
def load_from_config(self, section):
self.username = saq.CONFIG[section]['username']
self.password = saq.CONFIG[section]['password']
self.server = saq.CONFIG[section]['server']
self.target_mailbox = saq.CONFIG[section]['target_mailbox']
self.frequency = saq.CONFIG[section].getint('frequency', fallback=60)
#self.folder = saq.CONFIG[section]['folder']
self.delete_emails = saq.CONFIG[section].getboolean('delete_emails', fallback=False)
self.always_alert = saq.CONFIG[section].getboolean('always_alert', fallback=False)
self.alert_prefix = saq.CONFIG[section]['alert_prefix']
self.section = section
for option, value in saq.CONFIG[section].items():
if not option.startswith('folder_'):
continue
self.folders.append(value)
if not self.folders:
logging.error(f"no folder configuration options found for {self.target_mailbox} "
f"in configuration section {section}")
@property
def tracking_db_path(self):
return os.path.join(self.collector.persistence_dir, f'{self.target_mailbox}@{self.server}.db')
def start(self):
self.execution_thread = threading.Thread(target=self.run, name=f'EWS Collection {type(self).__name__}')
self.execution_thread.start()
def debug(self):
self.execute()
def stop(self):
pass
def wait(self, *args, **kwargs):
return self.execution_thread.join(*args, **kwargs)
def run(self):
while not self.collector.is_service_shutdown:
try:
self.execute()
except ( ErrorTimeoutExpired, ConnectionError ) as e:
logging.warning(f"attempt to pull emails from {self.target_mailbox} failed: {e}")
except Exception as e:
logging.error(f"uncaught exception {e}")
report_exception()
# we only execute this every self.frequency seconds
if self.collector.service_shutdown_event.wait(self.frequency):
break
def execute(self, *args, **kwargs):
try:
self._execute(*args, **kwargs)
except ReadTimeout as e:
logging.error(f"read timed out for {self.target_mailbox}: {e}")
return
def _execute(self, *args, **kwargs):
if not self.password:
logging.error(f"no password given for {self.section}. authentication will not be attempted.")
return
if not self.delete_emails:
if not os.path.exists(self.tracking_db_path):
with sqlite3.connect(self.tracking_db_path) as db:
c = db.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS ews_tracking (
exchange_id TEXT NOT NULL,
message_id TEXT NOT NULL,
insert_date INT NOT NULL )""")
c.execute("""
CREATE INDEX IF NOT EXISTS idx_exchange_id ON ews_tracking(exchange_id)""")
c.execute("""
CREATE INDEX IF NOT EXISTS idx_insert_date ON ews_tracking(insert_date)""")
db.commit()
# get the next emails from this account
credentials = Credentials(self.username, self.password)
config = Configuration(server=self.server, credentials=credentials, auth_type=NTLM) # TODO auth_type should be configurable
_account_class = kwargs.get('account_class') or Account # Account class connects to exchange.
account = _account_class(self.target_mailbox, config=config, autodiscover=False, access_type=DELEGATE) # TODO autodiscover, access_type should be configurable
for folder in self.folders:
path_parts = [_.strip() for _ in folder.split('/')]
root = path_parts.pop(0)
_account = kwargs.get('account_object') or account
try:
target_folder = getattr(_account, root)
except AttributeError:
public_folders_root = _account.public_folders_root
target_folder = public_folders_root / root
#print(target_folder.tree())
for path_part in path_parts:
target_folder = target_folder / path_part
target_folder.refresh()
logging.info(f"checking for emails in {self.target_mailbox} target {folder}")
total_count = 0
already_processed_count = 0
error_count = 0
for message in target_folder.all().order_by('-datetime_received'):
if isinstance(message, ResponseMessageError):
logging.warning(f"error when iterating mailbox {self.target_mailbox} folder {folder}: {message} ({type(message)})")
continue
# XXX not sure why this is happening?
if message.id is None:
continue
total_count += 1
try:
# if we're not deleting emails then we need to make sure we keep track of which ones we've already processed
if not self.delete_emails:
with sqlite3.connect(self.tracking_db_path) as db:
c = db.cursor()
c.execute("SELECT message_id FROM ews_tracking WHERE exchange_id = ?", (message.id,))
result = c.fetchone()
if result is not None:
#logging.debug("already processed exchange message {} message id {} from {}@{}".format(
#message.id, message.message_id, self.target_mailbox, self.server))
already_processed_count += 1
continue
# otherwise process the email message (subclasses deal with the site logic)
self.email_received(message)
except Exception as e:
logging.error(f"unable to process email: {e}")
report_exception()
error_count += 1
if self.delete_emails:
try:
logging.debug(f"deleting message {message.id}")
message.delete()
except Exception as e:
logging.error(f"unable to delete message: {e}")
else:
# if we're not deleting the emails then we track which ones we've already processed
with sqlite3.connect(self.tracking_db_path) as db:
c = db.cursor()
c.execute("""
INSERT INTO ews_tracking (
exchange_id,
message_id,
insert_date ) VALUES ( ?, ?, ? )""",
(message.id, message.message_id, local_time().timestamp()))
# TODO delete anything older than X days
db.commit()
logging.info(f"finished checking for emails in {self.target_mailbox} target {folder}"
f" total {total_count} already_processed {already_processed_count} error {error_count}")
def email_received(self, email):
raise NotImplementedError()
class EWSCollector(Collector):
def __init__(self, *args, **kwargs):
super().__init__(service_config=saq.CONFIG['service_ews_collector'],
workload_type='ews',
delete_files=True,
*args, **kwargs)
# this is super important - this library will log an entire base64 email at warning level
# if there is a base64 error, which it looks like there often is
logging.getLogger('exchangelib').setLevel(logging.ERROR)
def initialize_collector(self):
# the list of EWSCollectionBaseConfiguration objects we're operating
self.account_configurations = []
for section in saq.CONFIG.sections():
if section.startswith('ews_'):
if not saq.CONFIG[section].getboolean('enabled', fallback=False):
continue
module_name = saq.CONFIG[section]['module']
try:
_module = importlib.import_module(module_name)
except Exception as e:
logging.error(f"unable to import ews account config module {module_name}: {e}")
report_exception()
continue
class_name = saq.CONFIG[section]['class']
try:
module_class = getattr(_module, class_name)
except AttributeError as e:
logging.error("class {} does not exist in module {} in ews account config {}".format(
class_name, module_name, section))
report_exception()
continue
account_config = module_class(self)
account_config.load_from_config(section)
logging.info(f"loaded EWS account configuration {section}")
self.account_configurations.append(account_config)
def extended_collection(self):
# start a separate collection thread for each account we're collecting emails for
logging.debug("starting ews account collectors")
for account in self.account_configurations:
account.start()
logging.debug("waiting for ews account collectors to complete")
for account in self.account_configurations:
account.wait()
def debug_extended_collection(self):
logging.debug("debugging ews account collectors")
for account in self.account_configurations:
account.debug()
|
run_end_to_end_tests.py
|
#!/usr/bin/env python3
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import flask
import glob
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
import threading
import traceback
import urllib
from mypy import api as mypy_api
from streamer import node_base
from streamer.controller_node import ControllerNode
from streamer.configuration import ConfigError
from werkzeug.utils import secure_filename
OUTPUT_DIR = 'output_files/'
TEST_DIR = 'test_assets/'
CLOUD_TEST_ASSETS = (
'https://storage.googleapis.com/shaka-streamer-assets/test-assets/')
# Turn down Flask's logging so that the console isn't flooded with messages
# about every request. Because flask is built on top of another tool called
# "werkzeug", this the name we use to retrieve the log instance.
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Changes relative path to where this file is.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(BASE_DIR)
controller = None
use_system_binaries = False
# Flask was unable to autofind the root_path correctly after an os.chdir() from another directory
# Dunno why,refer to https://stackoverflow.com/questions/35864584/error-no-such-file-or-directory-when-using-os-chdir-in-flask
app = flask.Flask(__name__, root_path=BASE_DIR)
# Stops browser from caching files to prevent cross-test contamination.
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
def cleanup():
# If the controller is running, stop it.
global controller
if controller is not None:
controller.stop()
controller = None
# If the output directory exists, delete it and make a new one.
if os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
os.mkdir(OUTPUT_DIR)
def createCrossOriginResponse(body=None, status=200, mimetype='text/plain'):
# Enable CORS because karma and flask are cross-origin.
resp = flask.Response(response=body, status=status)
resp.headers.add('Content-Type', mimetype)
resp.headers.add('Access-Control-Allow-Origin', '*')
resp.headers.add('Access-Control-Allow-Methods', 'GET,POST')
return resp
def dashStreamsReady(manifest_path):
"""Wait for DASH streams to be ready.
Return True if the DASH manifest exists and each Representation has at least
one segment in it.
"""
# Check to see if the DASH manifest exists yet.
if not os.path.exists(manifest_path):
return False
# Waiting until every Representation has a segment.
pattern = re.compile(r'<Representation.*?((\n).*?)*?Representation>')
with open(manifest_path) as manifest_file:
for representation in pattern.finditer(manifest_file.read()):
if controller.is_low_latency_dash_mode():
# LL-DASH manifests do not contain the segment reference tag <S>.
# Check for the availabilityTimeOffset attribute instead.
if not re.search(r'availabilityTimeOffset', representation.group()):
# This Representation does not have a availabilityTimeOffset yet,
# meaning the first chunk is not yet ready for playout.
return False
else:
if not re.search(r'<S t', representation.group()):
# This Representation has no segments.
return False
return True
def hlsStreamsReady(master_playlist_path):
"""Wait for HLS streams to be ready.
Return True if the HLS master playlist exists, and all of the media playlists
referenced by it exist, and each of those media playlists have at least one
segment in it.
"""
# Check to see if the HLS master playlist exists yet.
if not os.path.exists(master_playlist_path):
return False
# Parsing master playlist to see how many media playlists there are.
# Do this every time, since the master playlist contents may change.
with open(master_playlist_path) as hls_file:
contents = hls_file.read()
media_playlist_list = re.findall(r'^.*\.m3u8$', contents, re.MULTILINE)
media_playlist_count = len(media_playlist_list)
# See how many playlists exist so far.
playlist_list = glob.glob(OUTPUT_DIR + '*.m3u8')
# Return False if we don't have the right number. The +1 accounts for the
# master playlist.
if len(playlist_list) != media_playlist_count + 1:
return False
for playlist_path in playlist_list:
# Use os.path.samefile method instead of the == operator because
# this might be a windows machine.
if os.path.samefile(playlist_path, master_playlist_path):
# Skip the master playlist
continue
with open(playlist_path) as playlist_file:
if '#EXTINF' not in playlist_file.read():
# This doesn't have segments yet.
return False
return True
@app.route('/start', methods = ['POST'])
def start():
global controller
if controller is not None:
return createCrossOriginResponse(
status=403, body='Instance already running!')
cleanup()
# Receives configs from the tests to start Shaka Streamer.
try:
configs = json.loads(flask.request.data)
except Exception as e:
return createCrossOriginResponse(status=400, body=str(e))
# Enforce quiet mode without needing it specified in every test.
configs['pipeline_config']['quiet'] = True
controller = ControllerNode()
try:
controller.start(configs['output_location'],
configs['input_config'],
configs['pipeline_config'],
configs['bitrate_config'],
check_deps=False,
use_hermetic=not use_system_binaries)
except Exception as e:
# If the controller throws an exception during startup, we want to call
# stop() to shut down any external processes that have already been started.
controller.stop()
controller = None
# Then, fail the request with a message that indicates what the error was.
if isinstance(e, ConfigError):
body = json.dumps({
'error_type': type(e).__name__,
'class_name': e.class_name,
'field_name': e.field_name,
'field_type': e.field.get_type_name(),
'message': str(e),
})
return createCrossOriginResponse(
status=418, mimetype='application/json', body=body)
elif isinstance(e, RuntimeError):
body = json.dumps({
'error_type': 'RuntimeError',
'message': str(e),
})
return createCrossOriginResponse(
status=418, mimetype='application/json', body=body)
else:
print('EXCEPTION', repr(e), traceback.format_exc(), flush=True)
return createCrossOriginResponse(status=500, body=str(e))
return createCrossOriginResponse()
@app.route('/stop')
def stop():
global controller
resp = createCrossOriginResponse()
if controller is not None:
# Check status to see if one of the processes exited.
if controller.check_status() == node_base.ProcessStatus.Errored:
resp = createCrossOriginResponse(
status=500, body='Some processes exited with non-zero exit codes')
cleanup()
return resp
@app.route('/output_files/<path:filename>', methods = ['GET', 'OPTIONS'])
def send_file(filename):
filename = secure_filename(filename)
if not controller:
return createCrossOriginResponse(
status=403, body='Instance already shut down!')
elif controller.is_vod():
# If streaming mode is vod, needs to wait until packager is completely
# done packaging contents.
while True:
status = controller.check_status()
if status == node_base.ProcessStatus.Finished:
break
elif status != node_base.ProcessStatus.Running:
return createCrossOriginResponse(
status=500, body='Some processes exited with non-zero exit codes')
time.sleep(1)
else:
# If streaming mode is live, needs to wait for specific content in
# manifest until it can be loaded by the player.
if filename.endswith('.mpd'):
while not dashStreamsReady(OUTPUT_DIR + filename):
time.sleep(1)
elif filename.endswith('.m3u8') and not filename.startswith('stream_'):
while not hlsStreamsReady(OUTPUT_DIR + filename):
time.sleep(1)
# Sending over requested files.
try:
response = flask.send_file(OUTPUT_DIR + filename)
except FileNotFoundError:
response = flask.Response(response='File not found', status=404)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'RANGE')
return response
def fetch_cloud_assets():
file_list = [
'BigBuckBunny.1080p.mp4',
'Sintel.2010.720p.Small.mkv',
'Sintel.2010.Arabic.vtt',
'Sintel.2010.Chinese.vtt',
'Sintel.2010.English.vtt',
'Sintel.2010.Esperanto.vtt',
'Sintel.2010.French.vtt',
'Sintel.2010.Spanish.vtt',
'Sintel.with.subs.mkv',
]
# Downloading all the assests for tests.
for file in file_list:
if not os.path.exists(TEST_DIR + file):
response = urllib.request.urlretrieve(CLOUD_TEST_ASSETS +
file,
TEST_DIR + file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--runs', default=1, type=int,
help='Number of trials to run')
parser.add_argument('--reporters', nargs='+',
help='Enables specified reporters in karma')
parser.add_argument('--use-system-binaries',
action='store_true',
help='Use FFmpeg, FFprobe and Shaka Packager binaries ' +
'found in PATH instead of the ones offered by ' +
'Shaka Streamer.')
args = parser.parse_args()
global use_system_binaries
use_system_binaries = args.use_system_binaries
# Do static type checking on the project first.
type_check_result = mypy_api.run(['streamer/'])
if type_check_result[2] != 0:
print('The type checker found the following errors: ')
print(type_check_result[0])
return 1
# Install test dependencies.
install_deps_command = ['npm', 'install']
subprocess.check_call(install_deps_command)
# Fetch streams used in tests.
if not os.path.exists(TEST_DIR):
os.mkdir(TEST_DIR)
fetch_cloud_assets()
# Start up flask server on a thread.
# Daemon is set to True so that this thread automatically gets
# killed when exiting main. Flask does not have any clean alternatives
# to be killed.
threading.Thread(target=app.run, daemon=True).start()
fails = 0
trials = args.runs
print('Running', trials, 'trials')
for i in range(trials):
# Start up karma.
karma_args = [
'node',
'node_modules/karma/bin/karma',
'start',
'tests/karma.conf.js',
# DRM currently is not compatible with headless, so it's run in Chrome.
# Linux: If you want to run tests as "headless", wrap it with "xvfb-run -a".
'--browsers', 'Chrome',
'--single-run',
]
if args.reporters:
converted_string = ','.join(args.reporters)
karma_args += [
'--reporters',
converted_string,
]
# If the exit code was not 0, the tests in karma failed or crashed.
if subprocess.call(karma_args) != 0:
fails += 1
print('\n\nNumber of failures:', fails, '\nNumber of trials:', trials)
print('\nSuccess rate:', 100 * (trials - fails) / trials, '%')
cleanup()
return fails
if __name__ == '__main__':
# Exit code based on test results from subprocess call.
sys.exit(main())
|
server.py
|
import socket, threading, time
MAX_CONNECTION_NUMBER = 5
MAX_MESSAGE_LIMIT = 1024 # bytes
PORT = 9999
IP = '127.0.0.1'
def tcp_link(sock, addr):
print(f'Accpet new connection from {sock} {addr}...')
sock.send(b'Welcome!')
while True:
data = sock.recv(MAX_MESSAGE_LIMIT) # max recv bytes 1k
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
data = data.decode('utf-8')
msg = (f'Hello, {data}').encode('utf-8')
sock.send(msg)
sock.close() # close connection
print('Connection from:\nSocket:{}\nAddr:{}.'.format(sock, addr))
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create socket
s.bind((IP, PORT)) # bind port
s.listen(MAX_CONNECTION_NUMBER) # listening port
print('Waiting for connection')
while True:
sock, addr = s.accept() # accept new connection
t = threading.Thread(target=tcp_link, args=(sock, addr)) # create new thread for TCP connection
t.start()
if __name__ == "__main__":
main()
|
core.py
|
"""
homeassistant
~~~~~~~~~~~~~
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
import os
import time
import logging
import threading
import enum
import re
import functools as ft
from collections import namedtuple
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_STOP, EVENT_TIME_CHANGED, EVENT_STATE_CHANGED,
EVENT_CALL_SERVICE, ATTR_NOW, ATTR_DOMAIN, ATTR_SERVICE, MATCH_ALL,
EVENT_SERVICE_EXECUTED, ATTR_SERVICE_CALL_ID, EVENT_SERVICE_REGISTERED,
TEMP_CELCIUS, TEMP_FAHRENHEIT, ATTR_FRIENDLY_NAME)
import homeassistant.util as util
import homeassistant.util.dt as date_util
import homeassistant.helpers.temperature as temp_helper
DOMAIN = "homeassistant"
# How often time_changed event should fire
TIMER_INTERVAL = 1 # seconds
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Define number of MINIMUM worker threads.
# During bootstrap of HA (see bootstrap._setup_component()) worker threads
# will be added for each component that polls devices.
MIN_WORKER_THREAD = 2
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(?P<domain>\w+)\.(?P<entity>\w+)$")
_LOGGER = logging.getLogger(__name__)
# Temporary to support deprecated methods
_MockHA = namedtuple("MockHomeAssistant", ['bus'])
class HomeAssistant(object):
""" Core class to route all communication to right components. """
def __init__(self):
self.pool = pool = create_worker_pool()
self.bus = EventBus(pool)
self.services = ServiceRegistry(self.bus, pool)
self.states = StateMachine(self.bus)
self.config = Config()
def start(self):
""" Start home assistant. """
_LOGGER.info(
"Starting Home Assistant (%d threads)", self.pool.worker_count)
create_timer(self)
self.bus.fire(EVENT_HOMEASSISTANT_START)
def block_till_stopped(self):
""" Will register service homeassistant/stop and
will block until called. """
request_shutdown = threading.Event()
def stop_homeassistant(service):
""" Stops Home Assistant. """
request_shutdown.set()
self.services.register(
DOMAIN, SERVICE_HOMEASSISTANT_STOP, stop_homeassistant)
while not request_shutdown.isSet():
try:
time.sleep(1)
except KeyboardInterrupt:
break
self.stop()
def stop(self):
""" Stops Home Assistant and shuts down all threads. """
_LOGGER.info("Stopping")
self.bus.fire(EVENT_HOMEASSISTANT_STOP)
# Wait till all responses to homeassistant_stop are done
self.pool.block_till_done()
self.pool.stop()
def track_point_in_time(self, action, point_in_time):
"""Deprecated method as of 8/4/2015 to track point in time."""
_LOGGER.warning(
'hass.track_point_in_time is deprecated. '
'Please use homeassistant.helpers.event.track_point_in_time')
import homeassistant.helpers.event as helper
helper.track_point_in_time(self, action, point_in_time)
def track_point_in_utc_time(self, action, point_in_time):
"""Deprecated method as of 8/4/2015 to track point in UTC time."""
_LOGGER.warning(
'hass.track_point_in_utc_time is deprecated. '
'Please use homeassistant.helpers.event.track_point_in_utc_time')
import homeassistant.helpers.event as helper
helper.track_point_in_utc_time(self, action, point_in_time)
def track_utc_time_change(self, action,
year=None, month=None, day=None,
hour=None, minute=None, second=None):
"""Deprecated method as of 8/4/2015 to track UTC time change."""
# pylint: disable=too-many-arguments
_LOGGER.warning(
'hass.track_utc_time_change is deprecated. '
'Please use homeassistant.helpers.event.track_utc_time_change')
import homeassistant.helpers.event as helper
helper.track_utc_time_change(self, action, year, month, day, hour,
minute, second)
def track_time_change(self, action,
year=None, month=None, day=None,
hour=None, minute=None, second=None, utc=False):
"""Deprecated method as of 8/4/2015 to track time change."""
# pylint: disable=too-many-arguments
_LOGGER.warning(
'hass.track_time_change is deprecated. '
'Please use homeassistant.helpers.event.track_time_change')
import homeassistant.helpers.event as helper
helper.track_time_change(self, action, year, month, day, hour,
minute, second)
class JobPriority(util.OrderedEnum):
""" Provides priorities for bus events. """
# pylint: disable=no-init,too-few-public-methods
EVENT_CALLBACK = 0
EVENT_SERVICE = 1
EVENT_STATE = 2
EVENT_TIME = 3
EVENT_DEFAULT = 4
@staticmethod
def from_event_type(event_type):
""" Returns a priority based on event type. """
if event_type == EVENT_TIME_CHANGED:
return JobPriority.EVENT_TIME
elif event_type == EVENT_STATE_CHANGED:
return JobPriority.EVENT_STATE
elif event_type == EVENT_CALL_SERVICE:
return JobPriority.EVENT_SERVICE
elif event_type == EVENT_SERVICE_EXECUTED:
return JobPriority.EVENT_CALLBACK
else:
return JobPriority.EVENT_DEFAULT
class EventOrigin(enum.Enum):
""" Distinguish between origin of event. """
# pylint: disable=no-init,too-few-public-methods
local = "LOCAL"
remote = "REMOTE"
def __str__(self):
return self.value
# pylint: disable=too-few-public-methods
class Event(object):
""" Represents an event within the Bus. """
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = date_util.strip_microseconds(
time_fired or date_util.utcnow())
def as_dict(self):
""" Returns a dict representation of this Event. """
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': date_util.datetime_to_str(self.time_fired),
}
def __repr__(self):
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
else:
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
""" Class that allows different components to communicate via services
and events.
"""
def __init__(self, pool=None):
self._listeners = {}
self._lock = threading.Lock()
self._pool = pool or create_worker_pool()
@property
def listeners(self):
""" Dict with events that is being listened for and the number
of listeners.
"""
with self._lock:
return {key: len(self._listeners[key])
for key in self._listeners}
def fire(self, event_type, event_data=None, origin=EventOrigin.local):
""" Fire an event. """
if not self._pool.running:
raise HomeAssistantError('Home Assistant has shut down.')
with self._lock:
# Copy the list of the current listeners because some listeners
# remove themselves as a listener while being executed which
# causes the iterator to be confused.
get = self._listeners.get
listeners = get(MATCH_ALL, []) + get(event_type, [])
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
job_priority = JobPriority.from_event_type(event_type)
for func in listeners:
self._pool.add_job(job_priority, (func, event))
def listen(self, event_type, listener):
""" Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
with self._lock:
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def listen_once(self, event_type, listener):
""" Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
"""
@ft.wraps(listener)
def onetime_listener(event):
""" Removes listener from eventbus and then fires listener. """
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed.
# This will make sure the second time it does nothing.
onetime_listener.run = True
self.remove_listener(event_type, onetime_listener)
listener(event)
self.listen(event_type, onetime_listener)
return onetime_listener
def remove_listener(self, event_type, listener):
""" Removes a listener of a specific event_type. """
with self._lock:
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
pass
class State(object):
"""
Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
# pylint: disable=too-many-arguments
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
if not ENTITY_ID_PATTERN.match(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
self.entity_id = entity_id.lower()
self.state = state
self.attributes = attributes or {}
self.last_updated = date_util.strip_microseconds(
last_updated or date_util.utcnow())
# Strip microsecond from last_changed else we cannot guarantee
# state == State.from_dict(state.as_dict())
# This behavior occurs because to_dict uses datetime_to_str
# which does not preserve microseconds
self.last_changed = date_util.strip_microseconds(
last_changed or self.last_updated)
@property
def domain(self):
""" Returns domain of this state. """
return util.split_entity_id(self.entity_id)[0]
@property
def object_id(self):
""" Returns object_id of this state. """
return util.split_entity_id(self.entity_id)[1]
@property
def name(self):
""" Name to represent this state. """
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def copy(self):
""" Creates a copy of itself. """
return State(self.entity_id, self.state,
dict(self.attributes), self.last_changed)
def as_dict(self):
""" Converts State to a dict to be used within JSON.
Ensures: state == State.from_dict(state.as_dict()) """
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': self.attributes,
'last_changed': date_util.datetime_to_str(self.last_changed),
'last_updated': date_util.datetime_to_str(self.last_updated)}
@classmethod
def from_dict(cls, json_dict):
""" Static method to create a state from a dict.
Ensures: state == State.from_json_dict(state.to_json_dict()) """
if not (json_dict and
'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if last_changed:
last_changed = date_util.str_to_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if last_updated:
last_updated = date_util.str_to_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
date_util.datetime_to_local_str(self.last_changed))
class StateMachine(object):
""" Helper class that tracks the state of different entities. """
def __init__(self, bus):
self._states = {}
self._bus = bus
self._lock = threading.Lock()
def entity_ids(self, domain_filter=None):
""" List of entity ids that are being tracked. """
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [state.entity_id for key, state
in self._states.items()
if util.split_entity_id(key)[0] == domain_filter]
def all(self):
""" Returns a list of all states. """
with self._lock:
return [state.copy() for state in self._states.values()]
def get(self, entity_id):
""" Returns the state of the specified entity. """
state = self._states.get(entity_id.lower())
# Make a copy so people won't mutate the state
return state.copy() if state else None
def is_state(self, entity_id, state):
""" Returns True if entity exists and is specified state. """
entity_id = entity_id.lower()
return (entity_id in self._states and
self._states[entity_id].state == state)
def remove(self, entity_id):
""" Removes an entity from the state machine.
Returns boolean to indicate if an entity was removed. """
entity_id = entity_id.lower()
with self._lock:
return self._states.pop(entity_id, None) is not None
def set(self, entity_id, new_state, attributes=None):
""" Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
with self._lock:
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = is_existing and old_state.state == new_state
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
# If state did not exist or is different, set it
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
event_data = {'entity_id': entity_id, 'new_state': state}
if old_state:
event_data['old_state'] = old_state
self._bus.fire(EVENT_STATE_CHANGED, event_data)
def track_change(self, entity_ids, action, from_state=None, to_state=None):
"""
DEPRECATED AS OF 8/4/2015
"""
_LOGGER.warning(
'hass.states.track_change is deprecated. '
'Use homeassistant.helpers.event.track_state_change instead.')
import homeassistant.helpers.event as helper
helper.track_state_change(_MockHA(self._bus), entity_ids, action,
from_state, to_state)
# pylint: disable=too-few-public-methods
class ServiceCall(object):
""" Represents a call to a service. """
__slots__ = ['domain', 'service', 'data']
def __init__(self, domain, service, data=None):
self.domain = domain
self.service = service
self.data = data or {}
def __repr__(self):
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
else:
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
""" Offers services over the eventbus. """
def __init__(self, bus, pool=None):
self._services = {}
self._lock = threading.Lock()
self._pool = pool or create_worker_pool()
self._bus = bus
self._cur_id = 0
bus.listen(EVENT_CALL_SERVICE, self._event_to_service_call)
@property
def services(self):
""" Dict with per domain a list of available services. """
with self._lock:
return {domain: list(self._services[domain].keys())
for domain in self._services}
def has_service(self, domain, service):
""" Returns True if specified service exists. """
return service in self._services.get(domain, [])
def register(self, domain, service, service_func):
""" Register a service. """
with self._lock:
if domain in self._services:
self._services[domain][service] = service_func
else:
self._services[domain] = {service: service_func}
self._bus.fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service})
def call(self, domain, service, service_data=None, blocking=False):
"""
Calls specified service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
call_id = self._generate_unique_id()
event_data = service_data or {}
event_data[ATTR_DOMAIN] = domain
event_data[ATTR_SERVICE] = service
event_data[ATTR_SERVICE_CALL_ID] = call_id
if blocking:
executed_event = threading.Event()
def service_executed(call):
"""
Called when a service is executed.
Will set the event if matches our service call.
"""
if call.data[ATTR_SERVICE_CALL_ID] == call_id:
executed_event.set()
self._bus.listen(EVENT_SERVICE_EXECUTED, service_executed)
self._bus.fire(EVENT_CALL_SERVICE, event_data)
if blocking:
success = executed_event.wait(SERVICE_CALL_LIMIT)
self._bus.remove_listener(
EVENT_SERVICE_EXECUTED, service_executed)
return success
def _event_to_service_call(self, event):
""" Calls a service from an event. """
service_data = dict(event.data)
domain = service_data.pop(ATTR_DOMAIN, None)
service = service_data.pop(ATTR_SERVICE, None)
if not self.has_service(domain, service):
return
service_handler = self._services[domain][service]
service_call = ServiceCall(domain, service, service_data)
# Add a job to the pool that calls _execute_service
self._pool.add_job(JobPriority.EVENT_SERVICE,
(self._execute_service,
(service_handler, service_call)))
def _execute_service(self, service_and_call):
""" Executes a service and fires a SERVICE_EXECUTED event. """
service, call = service_and_call
service(call)
if ATTR_SERVICE_CALL_ID in call.data:
self._bus.fire(
EVENT_SERVICE_EXECUTED,
{ATTR_SERVICE_CALL_ID: call.data[ATTR_SERVICE_CALL_ID]})
def _generate_unique_id(self):
""" Generates a unique service call id. """
self._cur_id += 1
return "{}-{}".format(id(self), self._cur_id)
class Config(object):
""" Configuration settings for Home Assistant. """
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.latitude = None
self.longitude = None
self.temperature_unit = None
self.location_name = None
self.time_zone = None
# List of loaded components
self.components = []
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = os.path.join(os.getcwd(), 'config')
def path(self, *path):
""" Returns path to the file within the config dir. """
return os.path.join(self.config_dir, *path)
def temperature(self, value, unit):
""" Converts temperature to user preferred unit if set. """
if not (unit in (TEMP_CELCIUS, TEMP_FAHRENHEIT) and
self.temperature_unit and unit != self.temperature_unit):
return value, unit
try:
temp = float(value)
except ValueError: # Could not convert value to float
return value, unit
return (
round(temp_helper.convert(temp, unit, self.temperature_unit), 1),
self.temperature_unit)
def as_dict(self):
""" Converts config to a dictionary. """
time_zone = self.time_zone or date_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'temperature_unit': self.temperature_unit,
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
}
class HomeAssistantError(Exception):
""" General Home Assistant exception occured. """
pass
class InvalidEntityFormatError(HomeAssistantError):
""" When an invalid formatted entity is encountered. """
pass
class NoEntitySpecifiedError(HomeAssistantError):
""" When no entity is specified. """
pass
def create_timer(hass, interval=TIMER_INTERVAL):
""" Creates a timer. Timer will start on HOMEASSISTANT_START. """
# We want to be able to fire every time a minute starts (seconds=0).
# We want this so other modules can use that to make sure they fire
# every minute.
assert 60 % interval == 0, "60 % TIMER_INTERVAL should be 0!"
def timer():
"""Send an EVENT_TIME_CHANGED on interval."""
stop_event = threading.Event()
def stop_timer(event):
"""Stop the timer."""
stop_event.set()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
last_fired_on_second = -1
calc_now = date_util.utcnow
while not stop_event.isSet():
now = calc_now()
# First check checks if we are not on a second matching the
# timer interval. Second check checks if we did not already fire
# this interval.
if now.second % interval or \
now.second == last_fired_on_second:
# Sleep till it is the next time that we have to fire an event.
# Aim for halfway through the second that fits TIMER_INTERVAL.
# If TIMER_INTERVAL is 10 fire at .5, 10.5, 20.5, etc seconds.
# This will yield the best results because time.sleep() is not
# 100% accurate because of non-realtime OS's
slp_seconds = interval - now.second % interval + \
.5 - now.microsecond/1000000.0
time.sleep(slp_seconds)
now = calc_now()
last_fired_on_second = now.second
# Event might have been set while sleeping
if not stop_event.isSet():
try:
hass.bus.fire(EVENT_TIME_CHANGED, {ATTR_NOW: now})
except HomeAssistantError:
# HA raises error if firing event after it has shut down
break
def start_timer(event):
"""Start the timer."""
thread = threading.Thread(target=timer)
thread.daemon = True
thread.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_timer)
def create_worker_pool(worker_count=MIN_WORKER_THREAD):
""" Creates a worker pool to be used. """
def job_handler(job):
""" Called whenever a job is available to do. """
try:
func, arg = job
func(arg)
except Exception: # pylint: disable=broad-except
# Catch any exception our service/event_listener might throw
# We do not want to crash our ThreadPool
_LOGGER.exception("BusHandler:Exception doing job")
def busy_callback(worker_count, current_jobs, pending_jobs_count):
""" Callback to be called when the pool queue gets too big. """
_LOGGER.warning(
"WorkerPool:All %d threads are busy and %d jobs pending",
worker_count, pending_jobs_count)
for start, job in current_jobs:
_LOGGER.warning("WorkerPool:Current job from %s: %s",
date_util.datetime_to_local_str(start), job)
return util.ThreadPool(job_handler, worker_count, busy_callback)
|
client.py
|
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
canvas2 = None
nameEntry = None
nameWindow = None
gameWindow = None
leftBoxes = []
rightBoxes = []
finishingBox = None
playerType = None
playerTurn = None
player1Name = 'joining'
player2Name = 'joining'
player1Label = None
player2Label = None
player1Score = 0
player2Score = 0
player2ScoreLabel = None
player2ScoreLabel = None
dice = None
rollButton = None
resetButton = None
winingMessage = None
winingFunctionCall = 0
def checkColorPosition(boxes, color):
for box in boxes:
boxColor = box.cget("bg")
if(boxColor == color):
return boxes.index(box)
return False
def movePlayer1(steps):
global leftBoxes
boxPosition = checkColorPosition(leftBoxes[1:],"red")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(steps == remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='red')
global SERVER
global playerName
greetMessage = f'Red wins the game.'
SERVER.send(greetMessage.encode())
elif(steps < remainingSteps):
for box in leftBoxes[1:]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
leftBoxes[nextStep].configure(bg='red')
else:
print("Move False")
else:
# first step
leftBoxes[steps].configure(bg='red')
def movePlayer2(steps):
global rightBoxes
tempBoxes=rightBoxes[-2::-1]
boxPosition = checkColorPosition(tempBoxes,"yellow")
if(boxPosition):
diceValue = steps
coloredBoxIndex = boxPosition
totalSteps = 10
remainingSteps = totalSteps - coloredBoxIndex
if(diceValue == remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
global finishingBox
finishingBox.configure(bg='yellow',fg="black")
global SERVER
global playerName
greetMessage = f'Yellow wins the game.'
SERVER.send(greetMessage.encode())
elif(diceValue < remainingSteps):
for box in rightBoxes[-2::-1]:
box.configure(bg='white')
nextStep = (coloredBoxIndex + 1 ) + diceValue
rightBoxes[nextStep].configure(bg='yellow')
else:
print("Move False")
else:
# first step
rightBoxes[len(rightBoxes)-(steps+1)].configure(bg='yellow')
def rollDice():
global SERVER
#create a number variable in which the list of all the ASCII characters of the string will be stored
#Use backslash because unicode must have a backslash
diceChoices=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
#configure the label
value = random.choice(diceChoices)
global playerType
global rollButton
global playerTurn
rollButton.destroy()
playerTurn = False
if(playerType == 'player1'):
SERVER.send(f'{value}player2Turn'.encode())
if(playerType == 'player2'):
SERVER.send(f'{value}player1Turn'.encode())
def leftBoard():
global gameWindow
global leftBoxes
global screen_height
xPos = 30
for box in range(0,11):
if(box == 0):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="red")
boxLabel.place(x=xPos, y=screen_height/2 - 88)
leftBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2- 100)
leftBoxes.append(boxLabel)
xPos +=75
def rightBoard():
global gameWindow
global rightBoxes
global screen_height
xPos = 988
for box in range(0,11):
if(box == 10):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="yellow")
boxLabel.place(x=xPos, y=screen_height/2-88)
rightBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2 - 100)
rightBoxes.append(boxLabel)
xPos +=75
def finishingBox():
global gameWindow
global finishingBox
global screen_width
global screen_height
finishingBox = Label(gameWindow, text="Home", font=("Chalkboard SE", 32), width=8, height=4, borderwidth=0, bg="green", fg="white")
finishingBox.place(x=screen_width/2 - 68, y=screen_height/2 -160)
def gameWindow():
global gameWindow
global canvas2
global screen_width
global screen_height
global dice
global winingMessage
global resetButton
gameWindow = Tk()
gameWindow.title("Ludo Ladder")
gameWindow.attributes('-fullscreen',True)
screen_width = gameWindow.winfo_screenwidth()
screen_height = gameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas2 = Canvas( gameWindow, width = 500,height = 500)
canvas2.pack(fill = "both", expand = True)
# Display image
canvas2.create_image( 0, 0, image = bg, anchor = "nw")
# Add Text
canvas2.create_text( screen_width/2, screen_height/5, text = "Ludo Ladder", font=("Chalkboard SE",100), fill="white")
# Declaring Wining Message
winingMessage = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 250, text = "", font=("Chalkboard SE",100), fill='#fff176')
# Creating Reset Button
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
leftBoard()
rightBoard()
finishingBox()
global rollButton
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
global playerTurn
global playerType
global playerName
global player1Name
global player2Name
global player1Label
global player2Label
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if(playerType == 'player1' and playerTurn):
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
else:
rollButton.pack_forget()
# Creating Dice with value 1
dice = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 100, text = "\u2680", font=("Chalkboard SE",250), fill="white")
# Creating name board
player1Label = canvas2.create_text(400, screen_height/2 + 100, text = player1Name, font=("Chalkboard SE",80), fill='#fff176' )
player2Label = canvas2.create_text(screen_width - 300, screen_height/2 + 100, text = player2Name, font=("Chalkboard SE",80), fill='#fff176' )
# Creating Score Board
player1ScoreLabel = canvas2.create_text(400, screen_height/2 - 160, text = player1Score, font=("Chalkboard SE",80), fill='#fff176' )
player2ScoreLabel = canvas2.create_text(screen_width - 300, screen_height/2 - 160, text = player2Score, font=("Chalkboard SE",80), fill='#fff176' )
gameWindow.resizable(True, True)
gameWindow.mainloop()
if ("player1Turn" in message and playerType=='player1'):
playerTurn=True
rollButton=Button(gameWindow,text="roll dice",fg="black",font=("Chalkboard SE",15),bg="grey",command=rollDice,width=20,height=5)
rollButton.place(x=screen_width/2-80,y=screen_height/2+250)
elif("player2Turn" in message and playerType=='player2'):
playerTurn=True
rollButton=Button(gameWindow,text="roll dice",fg="black",font=("Chalkboard SE",15),bg="grey",command=rollDice,width=20,height=5)
rollButton.place(x=screen_width/2-80,y=screen_height/2+260)
if ('player1Turn' in message or 'player2Turn' in message):
diceChoices=['⚀','⚁','⚂','⚃','⚄','⚅']
diceValue=diceChoices.index(message[0])+1
if ('player2Turn' in message):
movePlayer1(diceValue)
if ('player1Turn' in message):
movePlayer2(diceValue)
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
gameWindow()
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Ludo Ladder")
nameWindow.attributes('-fullscreen',True)
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/2, screen_height/5, text = "Enter Name", font=("Chalkboard SE",100), fill="white")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 50), bd=5, bg='white')
nameEntry.place(x = screen_width/2 - 220, y=screen_height/4 + 100)
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=15, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/2 - 130, y=screen_height/2 - 30)
nameWindow.resizable(True, True)
nameWindow.mainloop()
def restGame():
global SERVER
SERVER.send("reset game".encode())
def handleWin(message):
global playerType
global rollButton
global canvas2
global winingMessage
global screen_width
global screen_height
global resetButton
#destroying button
if('Red' in message):
if(playerType == 'player2'):
rollButton.destroy()
if('Yellow' in message):
if(playerType == 'player1'):
rollButton.destroy()
# Adding Wining Message
message = message.split(".")[0] + "."
canvas2.itemconfigure(winingMessage, text = message)
#Placing Reset Button
resetButton.place(x=screen_width / 2 - 80, y=screen_height - 220)
def updateScore(message):
global canvas2
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if('Red' in message):
player1Score +=1
if('Yellow' in message):
player2Score +=1
canvas2.itemconfigure(player1ScoreLabel, text = player1Score)
canvas2.itemconfigure(player2ScoreLabel, text = player2Score)
def handleResetGame():
global canvas2
global playerType
global gameWindow
global rollButton
global dice
global screen_width
global screen_height
global playerTurn
global rightBoxes
global leftBoxes
global finishingBox
global resetButton
global winingMessage
global winingFunctionCall
canvas2.itemconfigure(dice, text='\u2680')
# Handling Reset Game
if(playerType == 'player1'):
# Creating roll dice button
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
playerTurn = True
if(playerType == 'player2'):
playerTurn = False
for rBox in rightBoxes[-2::-1]:
rBox.configure(bg='white')
for lBox in leftBoxes[1:]:
lBox.configure(bg='white')
finishingBox.configure(bg='green')
canvas2.itemconfigure(winingMessage, text="")
resetButton.destroy()
# Again Recreating Reset Button for next game
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
winingFunctionCall = 0
def recivedMsg():
global SERVER
global playerType
global playerTurn
global rollButton
global screen_width
global screen_height
global canvas2
global dice
global gameWindow
global player1Name
global player2Name
global player1Label
global player2Label
global winingFunctionCall
while True:
message = SERVER.recv(2048).decode()
if('player_type' in message):
recvMsg = eval(message)
playerType = recvMsg['player_type']
playerTurn = recvMsg['turn']
elif('player_names' in message):
players = eval(message)
players = players["player_names"]
for p in players:
if(p["type"] == 'player1'):
player1Name = p['name']
if(p['type'] == 'player2'):
player2Name = p['name']
elif('⚀' in message):
# Dice with value 1
canvas2.itemconfigure(dice, text='\u2680')
elif('⚁' in message):
# Dice with value 2
canvas2.itemconfigure(dice, text='\u2681')
elif('⚂' in message):
# Dice with value 3
canvas2.itemconfigure(dice, text='\u2682')
elif('⚃' in message):
# Dice with value 4
canvas2.itemconfigure(dice, text='\u2683')
elif('⚄' in message):
# Dice with value 5
canvas2.itemconfigure(dice, text='\u2684')
elif('⚅' in message):
# Dice with value 6
canvas2.itemconfigure(dice, text='\u2685')
elif('wins the game.' in message and winingFunctionCall == 0):
winingFunctionCall +=1
handleWin(message)
# Addition Activity
updateScore(message)
elif(message == 'reset game'):
handleResetGame()
#creating rollbutton
if('player1Turn' in message and playerType == 'player1'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
elif('player2Turn' in message and playerType == 'player2'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 260)
# Creating Name Board
if(player1Name != 'joining' and canvas2):
canvas2.itemconfigure(player1Label, text=player1Name)
if(player2Name != 'joining' and canvas2):
canvas2.itemconfigure(player2Label, text=player2Name)
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 8000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
app.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# AWS X-ray support
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
from aws_xray_sdk.core import patch_all
patch_all()
xray_recorder.begin_segment("Videos-init")
import logging
import json
import os
import pathlib
import pprint
import subprocess
import threading
import time
import boto3
import srt
from flask import Flask, jsonify, Response
from flask_cors import CORS
# -- Environment variables - defined by CloudFormation when deployed
VIDEO_BUCKET = os.environ.get('RESOURCE_BUCKET')
SSM_VIDEO_CHANNEL_MAP_PARAM = os.environ.get('PARAMETER_IVS_VIDEO_CHANNEL_MAP', 'retaildemostore-ivs-video-channel-map')
USE_DEFAULT_IVS_STREAMS = os.environ.get('USE_DEFAULT_IVS_STREAMS') == 'true'
DEFAULT_THUMB_FNAME = 'default_thumb.png'
STATIC_FOLDER = '/app/static'
STATIC_URL_PATH = '/static'
SUBTITLE_FORMAT = 'srt'
LOCAL_VIDEO_DIR = '/app/video-files/'
DEFAULT_STREAMS_CONFIG_S3_PATH = 'videos/default_streams/default_streams.json'
# -- Parameterised ffmpeg commands
FFMPEG_STREAM_CMD = """ffmpeg -loglevel panic -hide_banner -re -stream_loop -1 -i \"{video_filepath}\" \
-r 30 -c:v copy -f flv rtmps://{ingest_endpoint}:443/app/{stream_key} -map 0:s -f {subtitle_format} -"""
FFMPEG_SUBS_COMMAND = "ffmpeg -i \"{video_filepath}\" \"{subtitle_path}\""
# Globally accessed variable to store stream metadata (URLs & associated product IDs). Returned via `stream_details`
# endpoint
stream_details = {}
ivs_client = boto3.client('ivs')
ssm_client = boto3.client('ssm')
s3_client = boto3.client('s3')
# -- Load default streams config
def load_default_streams_config():
app.logger.info(f"Downloading default streams config from from bucket {VIDEO_BUCKET} with key {DEFAULT_STREAMS_CONFIG_S3_PATH}.")
config_response = s3_client.get_object(Bucket=VIDEO_BUCKET, Key=DEFAULT_STREAMS_CONFIG_S3_PATH)
config = json.loads(config_response['Body'].read().decode('utf-8'))
for (key, entry) in config.items():
app.logger.info(f"{key}, {entry}")
config[key] = {**entry, 'thumb_url': STATIC_URL_PATH + '/' + entry['thumb_fname']}
config[key].pop('thumb_fname', None)
app.logger.info("Pulled config:")
app.logger.info(config)
return config
# -- Video streaming
def download_video_file(s3_key):
"""
Downloads a video file and associated thumbnail from S3. Thumbnails are identified by a .png file with the same
name and in the same location as the video.
"""
local_path = LOCAL_VIDEO_DIR + s3_key.split('/')[-1]
app.logger.info(f"Downloading file {s3_key} from bucket {VIDEO_BUCKET} to {local_path}.")
s3_client.download_file(Bucket=VIDEO_BUCKET, Key=s3_key, Filename=local_path)
app.logger.info(f"File {s3_key} downloaded from bucket {VIDEO_BUCKET} to {local_path}.")
thumbnail_path = None
thumbnail_key = '.'.join(s3_key.split('.')[:-1]) + '.png'
try:
local_thumbnail_fname = thumbnail_key.split('/')[-1]
local_thumbnail_path = app.static_folder + '/' + local_thumbnail_fname
s3_client.download_file(Bucket=VIDEO_BUCKET, Key=thumbnail_key, Filename=local_thumbnail_path)
app.logger.info(f"File {thumbnail_key} downloaded from bucket {VIDEO_BUCKET} to {local_thumbnail_path}.")
thumbnail_path = app.static_url_path + '/' + local_thumbnail_fname
except Exception as e:
app.logger.warning(f'No thumbnail available for {VIDEO_BUCKET}/{s3_key} as {VIDEO_BUCKET}/{thumbnail_key} - '
f'exception: {e}')
return local_path, thumbnail_path
def get_ffmpeg_stream_cmd(video_filepath, ingest_endpoint, stream_key, subtitle_format):
"""
Returns the command to start streaming a video using ffmpeg.
"""
return FFMPEG_STREAM_CMD.format(**locals())
def get_ffmpeg_subs_cmd(video_filepath, subtitle_path):
"""
Returns the ffmpeg command to rip subtitles (ie. metadata) from a video file.
"""
return FFMPEG_SUBS_COMMAND.format(**locals())
def get_featured_products(video_filepath, channel_id):
"""
Extracts a list of product IDs from the metadata attached to a video file. The values are saved in the global
`stream_details` dict.
"""
subtitle_path = pathlib.Path(video_filepath).with_suffix('.srt')
get_subs_command = get_ffmpeg_subs_cmd(video_filepath, subtitle_path)
process = subprocess.run(
get_subs_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
with open(subtitle_path) as f:
subtitle_content = srt.parse(f)
for line in subtitle_content:
product_id = json.loads(line.content)['productId']
if 'products' not in stream_details[channel_id]:
stream_details[channel_id]['products'] = [product_id]
else:
if product_id not in stream_details[channel_id]['products']:
stream_details[channel_id]['products'].append(product_id)
def is_ssm_parameter_set(parameter_name):
"""
Returns whether an SSM parameter with a given name has been set (ie. value is not 'NONE')
"""
try:
response = ssm_client.get_parameter(Name=parameter_name)
return response['Parameter']['Value'] != 'NONE'
except ssm_client.exceptions.ParameterNotFound:
return False
def put_ivs_metadata(channel_arn, line):
"""
Sends metadata to a given IVS stream. Metadata can be any string, but the AWS Retail Demo Store UI expects
metadata of the format {"productId":"<product-id>"}
"""
try:
app.logger.info(f'Sending metadata to stream: {line}')
ivs_client.put_metadata(
channelArn=channel_arn,
metadata=line
)
except ivs_client.exceptions.ChannelNotBroadcasting:
app.logger.warning(f'Channel not broadcasting. Waiting for 5 seconds.')
app.logger.info('Running ffmpeg processes:')
app.logger.info(os.system("ps aux|grep 'PID\|ffmpeg'"))
time.sleep(5)
def get_stream_state(channel_arn):
"""
Returns the state of a stream given it's ARN. One of 'LIVE', 'OFFLINE' (from API response)
or 'NOT_BROADCASTING' (inferred).
"""
try:
stream_response = ivs_client.get_stream(channelArn=channel_arn)['stream']
stream_state = stream_response['state']
except ivs_client.exceptions.ChannelNotBroadcasting:
stream_state = "NOT_BROADCASTING"
return stream_state
def start_streams():
"""
Initiates all IVS streams based on environment variables. If the SSM_VIDEO_CHANNEL_MAP_PARAM (map of videos in
S3 to IVS channels) is set and the user has not requested to use the default IVS streams
(USE_DEFAULT_IVS_STREAMS, defined by CloudFormation input) then one stream will be started per video described
in the video to IVS channel map. Each stream runs in a separate thread.
If streams are not started, then `stream_details` will be set to the details of a collection of existing streams
"""
if is_ssm_parameter_set(SSM_VIDEO_CHANNEL_MAP_PARAM) and not USE_DEFAULT_IVS_STREAMS:
video_channel_param_value = ssm_client.get_parameter(Name=SSM_VIDEO_CHANNEL_MAP_PARAM)['Parameter']['Value']
app.logger.info(f"Found IVS channel map: {video_channel_param_value}")
video_channel_map = json.loads(video_channel_param_value)
for idx, (s3_video_key, ivs_channel_arn) in enumerate(video_channel_map.items()):
threading.Thread(target=stream, args=(s3_video_key, ivs_channel_arn, idx)).start()
else:
global stream_details
stream_details = load_default_streams_config()
def stream(s3_video_key, ivs_channel_arn, channel_id):
"""
Starts the stream for a given video file and IVS channel. The video file is streamed on a loop using ffmpeg, and
any attached metadata (from the subtitles embedded in the video file) is sent to the channel's `put_metadata`
endpoint.
"""
video_filepath, thumb_url = download_video_file(s3_video_key)
if thumb_url is None:
thumb_url = app.static_url_path + '/' + DEFAULT_THUMB_FNAME
channel_response = ivs_client.get_channel(arn=ivs_channel_arn)['channel']
ingest_endpoint = channel_response['ingestEndpoint']
playback_endpoint = channel_response['playbackUrl']
stream_details[channel_id] = {'playback_url': playback_endpoint,
'thumb_url': thumb_url}
get_featured_products(video_filepath, channel_id)
stream_state = get_stream_state(ivs_channel_arn)
stream_arn = ivs_client.list_stream_keys(channelArn=ivs_channel_arn)['streamKeys'][0]['arn']
stream_key = ivs_client.get_stream_key(arn=stream_arn)['streamKey']['value']
app.logger.info(f"Stream details:\nIngest endpoint: {ingest_endpoint}\nStream state: {stream_state}")
if SUBTITLE_FORMAT == 'srt':
while True:
if stream_state != "NOT_BROADCASTING":
app.logger.info(f"Stream {stream_arn} is currently in state {stream_state}. Waiting for state NOT_BROADCASTING")
sleep_time = 20
app.logger.info(f"Waiting for {sleep_time} seconds")
time.sleep(sleep_time)
stream_state = get_stream_state(ivs_channel_arn)
continue
app.logger.info('Starting video stream')
ffmpeg_stream_cmd = get_ffmpeg_stream_cmd(video_filepath, ingest_endpoint, stream_key, SUBTITLE_FORMAT)
app.logger.info(f'ffmpeg command: {ffmpeg_stream_cmd}')
process = subprocess.Popen(
ffmpeg_stream_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
app.logger.info('Running ffmpeg processes:')
app.logger.info(os.system("ps aux|grep 'PID\|ffmpeg'"))
lines = iter(process.stdout)
app.logger.info('Starting event stream')
while True:
try:
int(next(lines).strip())
time_range = next(lines).strip()
if not '-->' in time_range:
raise ValueError(f'Expected a time range instead of {time_range}')
send_text = ''
while True:
text = next(lines).strip()
if len(text) == 0: break
if len(send_text)>0: send_text+='\n'
send_text += text
put_ivs_metadata(ivs_channel_arn, send_text)
except StopIteration:
app.logger.warning('Video iteration has stopped unexpectedly. Attempting restart in 10 seconds.')
time.sleep(10)
break
else:
raise NotImplementedError(f'{SUBTITLE_FORMAT} is not currently supported by this demo.')
# -- End Video streaming
# -- Logging
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
# -- End Logging
# -- Exceptions
class BadRequest(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# -- Handlers
app = Flask(__name__,
static_folder=STATIC_FOLDER,
static_url_path=STATIC_URL_PATH)
corps = CORS(app)
xray_recorder.configure(service='Videos Service')
XRayMiddleware(app, xray_recorder)
@app.errorhandler(BadRequest)
def handle_bad_request(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index():
return 'Videos Service'
@app.route('/stream_details')
def streams():
response_data = []
for value in stream_details.values():
response_data.append(value)
response = {
"streams": response_data
}
return Response(json.dumps(response), content_type = 'application/json')
@app.route('/health')
def health():
return 'OK'
if __name__ == '__main__':
app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.logger.setLevel(level=logging.INFO)
app.logger.info(f"VIDEO_BUCKET: {VIDEO_BUCKET}")
app.logger.info(f"SSM_VIDEO_CHANNEL_MAP_PARAM: {SSM_VIDEO_CHANNEL_MAP_PARAM}")
app.logger.info(f"USE_DEFAULT_IVS_STREAMS: {USE_DEFAULT_IVS_STREAMS}")
app.logger.info("Starting video streams")
start_streams()
app.logger.info("Starting API")
app.run(debug=False, host='0.0.0.0', port=80)
|
cli.py
|
import http.server
import os
import socketserver
import threading
from pathlib import Path
from webbrowser import open as webbrowser_open
import click
import jinja2
from cli import echo
from engine.stack import CloudformationStack
HTTP_SERVE_ADDRESS = "127.0.0.1"
HTTP_SERVE_PORT = 1234
APP_MAIN_PATH = Path(__file__).parent
def create_demo_index_file(tracking_server_url):
tpl_loader = jinja2.FileSystemLoader(searchpath=APP_MAIN_PATH.absolute())
tpl_env = jinja2.Environment(loader=tpl_loader)
tpl = tpl_env.get_template("index.html.tpl")
html = tpl.render(tracking_server_url=tracking_server_url)
index_file = Path(APP_MAIN_PATH, "index.html")
with index_file.open("w") as fh:
fh.write(html)
return index_file
def serve_demo():
os.chdir(APP_MAIN_PATH)
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer((HTTP_SERVE_ADDRESS, HTTP_SERVE_PORT), handler)
threading.Thread(target=httpd.serve_forever).start()
def demo_tracking_web(cf_stack_name, cfg):
@click.command(name="demo-tracking-web")
def _demo_tracking_web():
echo.h1("Web Tracking Demo")
serve_url = f"http://{HTTP_SERVE_ADDRESS}:{HTTP_SERVE_PORT}/"
# create index.html
cf_stack = CloudformationStack(cf_stack_name, cfg)
create_demo_index_file(f'{cf_stack.get_output("APIGatewayEndpoint")}/matomo-event-receiver/')
# serve the demo
echo.enum_elm(f"serving demo at {serve_url}")
echo.enum_elm("quit the server with <strg|control>-c.")
echo.info("")
serve_demo()
# open browser
webbrowser_open(serve_url)
return _demo_tracking_web
|
test_postgresql.py
|
import datetime
import mock # for the mock.call method, importing it without a namespace breaks python3
import os
import psycopg2
import shutil
import subprocess
import unittest
from mock import Mock, MagicMock, PropertyMock, patch, mock_open
from patroni.async_executor import CriticalTask
from patroni.dcs import Cluster, Leader, Member, RemoteMember, SyncState
from patroni.exceptions import PostgresConnectionException, PostgresException
from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE
from patroni.postmaster import PostmasterProcess
from patroni.utils import RetryFailedError
from six.moves import builtins
from threading import Thread, current_thread
class MockCursor(object):
def __init__(self, connection):
self.connection = connection
self.closed = False
self.rowcount = 0
self.results = []
def execute(self, sql, *params):
if sql.startswith('blabla'):
raise psycopg2.ProgrammingError()
elif sql == 'CHECKPOINT':
raise psycopg2.OperationalError()
elif sql.startswith('RetryFailedError'):
raise RetryFailedError('retry')
elif sql.startswith('SELECT slot_name'):
self.results = [('blabla',), ('foobar',)]
elif sql.startswith('SELECT CASE WHEN pg_is_in_recovery()'):
self.results = [(1, 2)]
elif sql.startswith('SELECT pg_is_in_recovery()'):
self.results = [(False, 2)]
elif sql.startswith('WITH replication_info AS ('):
replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\
'"state":"streaming","sync_state":"async","sync_priority":0}]'
self.results = [('', 0, '', '', '', '', False, replication_info)]
elif sql.startswith('SELECT name, setting'):
self.results = [('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
('search_path', 'public', None, 'string', 'user'),
('port', '5433', None, 'integer', 'postmaster'),
('listen_addresses', '*', None, 'string', 'postmaster'),
('autovacuum', 'on', None, 'bool', 'sighup'),
('unix_socket_directories', '/tmp', None, 'string', 'postmaster')]
elif sql.startswith('IDENTIFY_SYSTEM'):
self.results = [('1', 2, '0/402EEC0', '')]
elif sql.startswith('SELECT isdir, modification'):
self.results = [(False, datetime.datetime.now())]
elif sql.startswith('SELECT pg_read_file'):
self.results = [('1\t0/40159C0\tno recovery target specified\n\n' +
'2\t1/40159C0\tno recovery target specified\n',)]
elif sql.startswith('TIMELINE_HISTORY '):
self.results = [('', b'x\t0/40159C0\tno recovery target specified\n\n' +
b'1\t0/40159C0\tno recovery target specified\n\n' +
b'2\t0/402DD98\tno recovery target specified\n\n' +
b'3\t0/403DD98\tno recovery target specified\n')]
else:
self.results = [(None, None, None, None, None, None, None, None, None, None)]
def fetchone(self):
return self.results[0]
def fetchall(self):
return self.results
def __iter__(self):
for i in self.results:
yield i
def __enter__(self):
return self
def __exit__(self, *args):
pass
class MockConnect(object):
server_version = 99999
autocommit = False
closed = 0
def cursor(self):
return MockCursor(self)
def __enter__(self):
return self
def __exit__(self, *args):
pass
@staticmethod
def close():
pass
class MockPostmaster(object):
def __init__(self, is_running=True, is_single_master=False):
self.is_running = Mock(return_value=is_running)
self.is_single_master = Mock(return_value=is_single_master)
self.wait_for_user_backends_to_close = Mock()
self.signal_stop = Mock(return_value=None)
self.wait = Mock()
def pg_controldata_string(*args, **kwargs):
return b"""
pg_control version number: 942
Catalog version number: 201509161
Database system identifier: 6200971513092291716
Database cluster state: shut down in recovery
pg_control last modified: Fri Oct 2 10:57:06 2015
Latest checkpoint location: 0/30000C8
Prior checkpoint location: 0/2000060
Latest checkpoint's REDO location: 0/3000090
Latest checkpoint's REDO WAL file: 000000020000000000000003
Latest checkpoint's TimeLineID: 2
Latest checkpoint's PrevTimeLineID: 2
Latest checkpoint's full_page_writes: on
Latest checkpoint's NextXID: 0/943
Latest checkpoint's NextOID: 24576
Latest checkpoint's NextMultiXactId: 1
Latest checkpoint's NextMultiOffset: 0
Latest checkpoint's oldestXID: 931
Latest checkpoint's oldestXID's DB: 1
Latest checkpoint's oldestActiveXID: 943
Latest checkpoint's oldestMultiXid: 1
Latest checkpoint's oldestMulti's DB: 1
Latest checkpoint's oldestCommitTs: 0
Latest checkpoint's newestCommitTs: 0
Time of latest checkpoint: Fri Oct 2 10:56:54 2015
Fake LSN counter for unlogged rels: 0/1
Minimum recovery ending location: 0/30241F8
Min recovery ending loc's timeline: 2
Backup start location: 0/0
Backup end location: 0/0
End-of-backup record required: no
wal_level setting: hot_standby
Current wal_log_hints setting: on
Current max_connections setting: 100
Current max_worker_processes setting: 8
Current max_prepared_xacts setting: 0
Current max_locks_per_xact setting: 64
Current track_commit_timestamp setting: off
Maximum data alignment: 8
Database block size: 8192
Blocks per segment of large relation: 131072
WAL block size: 8192
Bytes per WAL segment: 16777216
Maximum length of identifiers: 64
Maximum columns in an index: 32
Maximum size of a TOAST chunk: 1996
Size of a large-object chunk: 2048
Date/time type storage: 64-bit integers
Float4 argument passing: by value
Float8 argument passing: by value
Data page checksum version: 0
"""
def psycopg2_connect(*args, **kwargs):
return MockConnect()
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
class TestPostgresql(unittest.TestCase):
_PARAMETERS = {'wal_level': 'hot_standby', 'max_replication_slots': 5, 'f.oo': 'bar',
'search_path': 'public', 'hot_standby': 'on', 'max_wal_senders': 5,
'wal_keep_segments': 8, 'wal_log_hints': 'on', 'max_locks_per_transaction': 64,
'max_worker_processes': 8, 'max_connections': 100, 'max_prepared_transactions': 0,
'track_commit_timestamp': 'off', 'unix_socket_directories': '/tmp'}
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
@patch('os.rename', Mock())
@patch.object(Postgresql, 'get_major_version', Mock(return_value=90600))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def setUp(self):
self.data_dir = 'data/test0'
self.config_dir = self.data_dir
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.p = Postgresql({'name': 'test0', 'scope': 'batman', 'data_dir': self.data_dir,
'config_dir': self.config_dir, 'retry_timeout': 10, 'pgpass': '/tmp/pgpass0',
'listen': '127.0.0.2, 127.0.0.3:5432', 'connect_address': '127.0.0.2:5432',
'authentication': {'superuser': {'username': 'test', 'password': 'test'},
'replication': {'username': 'replicator', 'password': 'rep-pass'}},
'remove_data_directory_on_rewind_failure': True,
'use_pg_rewind': True, 'pg_ctl_timeout': 'bla',
'parameters': self._PARAMETERS,
'recovery_conf': {'foo': 'bar'},
'pg_hba': ['host all all 0.0.0.0/0 md5'],
'callbacks': {'on_start': 'true', 'on_stop': 'true', 'on_reload': 'true',
'on_restart': 'true', 'on_role_change': 'true'}})
self.p._callback_executor = Mock()
self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres'})
self.leader = Leader(-1, 28, self.leadermem)
self.other = Member(0, 'test-1', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres',
'tags': {'replicatefrom': 'leader'}})
self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5434/postgres'})
def tearDown(self):
shutil.rmtree('data')
def test__initdb(self):
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [{'pgdata': 'bar'}]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [{'foo': 'bar', 1: 2}]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [1]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': 1})
@patch('os.path.exists', Mock(return_value=True))
@patch('os.unlink', Mock())
def test_delete_trigger_file(self):
self.p.delete_trigger_file()
@patch('subprocess.Popen')
@patch.object(Postgresql, 'wait_for_startup')
@patch.object(Postgresql, 'wait_for_port_open')
@patch.object(Postgresql, 'is_running')
def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen):
mock_is_running.return_value = MockPostmaster()
mock_wait_for_port_open.return_value = True
mock_wait_for_startup.return_value = False
mock_popen.return_value.stdout.readline.return_value = '123'
self.assertTrue(self.p.start())
mock_is_running.return_value = None
mock_postmaster = MockPostmaster()
with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster):
pg_conf = os.path.join(self.data_dir, 'postgresql.conf')
open(pg_conf, 'w').close()
self.assertFalse(self.p.start(task=CriticalTask()))
with open(pg_conf) as f:
lines = f.readlines()
self.assertTrue("f.oo = 'bar'\n" in lines)
mock_wait_for_startup.return_value = None
self.assertFalse(self.p.start(10))
self.assertIsNone(self.p.start())
mock_wait_for_port_open.return_value = False
self.assertFalse(self.p.start())
task = CriticalTask()
task.cancel()
self.assertFalse(self.p.start(task=task))
self.p.cancel()
self.assertFalse(self.p.start())
@patch.object(Postgresql, 'pg_isready')
@patch('patroni.postgresql.polling_loop', Mock(return_value=range(1)))
def test_wait_for_port_open(self, mock_pg_isready):
mock_pg_isready.return_value = STATE_NO_RESPONSE
mock_postmaster = MockPostmaster(is_running=False)
# No pid file and postmaster death
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
mock_postmaster.is_running.return_value = True
# timeout
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
# pg_isready failure
mock_pg_isready.return_value = 'garbage'
self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1))
# cancelled
self.p.cancel()
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, '_wait_for_connection_close', Mock())
def test_stop(self, mock_is_running):
# Postmaster is not running
mock_callback = Mock()
mock_is_running.return_value = None
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
# Is running, stopped successfully
mock_is_running.return_value = mock_postmaster = MockPostmaster()
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
mock_postmaster.signal_stop.assert_called()
# Stop signal failed
mock_postmaster.signal_stop.return_value = False
self.assertFalse(self.p.stop())
# Stop signal failed to find process
mock_postmaster.signal_stop.return_value = True
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
def test_restart(self):
self.p.start = Mock(return_value=False)
self.assertFalse(self.p.restart())
self.assertEqual(self.p.state, 'restart failed (restarting)')
@patch.object(builtins, 'open', MagicMock())
def test_write_pgpass(self):
self.p.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'})
self.p.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'})
def test_checkpoint(self):
with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
with patch.object(MockCursor, 'execute', Mock(return_value=None)):
self.assertIsNone(self.p.checkpoint())
self.assertEqual(self.p.checkpoint(), 'not accessible or not healty')
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch('patroni.postgresql.Postgresql.write_pgpass', MagicMock(return_value=dict()))
def test_pg_rewind(self, mock_cancellable_subprocess_call):
r = {'user': '', 'host': '', 'port': '', 'database': '', 'password': ''}
mock_cancellable_subprocess_call.return_value = 0
self.assertTrue(self.p.pg_rewind(r))
mock_cancellable_subprocess_call.side_effect = OSError
self.assertFalse(self.p.pg_rewind(r))
def test_check_recovery_conf(self):
self.p.write_recovery_conf({'primary_conninfo': 'foo'})
self.assertFalse(self.p.check_recovery_conf(None))
self.p.write_recovery_conf({})
self.assertTrue(self.p.check_recovery_conf(None))
@patch.object(Postgresql, 'start', Mock())
@patch.object(Postgresql, 'can_rewind', PropertyMock(return_value=True))
def test__get_local_timeline_lsn(self):
self.p.trigger_check_diverged_lsn()
with patch.object(Postgresql, 'controldata',
Mock(return_value={'Database cluster state': 'shut down in recovery',
'Minimum recovery ending location': '0/0',
"Min recovery ending loc's timeline": '0'})):
self.p.rewind_needed_and_possible(self.leader)
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[(False, ), Exception])):
self.p.rewind_needed_and_possible(self.leader)
@patch.object(Postgresql, 'start', Mock())
@patch.object(Postgresql, 'can_rewind', PropertyMock(return_value=True))
@patch.object(Postgresql, '_get_local_timeline_lsn', Mock(return_value=(2, '40159C1')))
@patch.object(Postgresql, 'check_leader_is_not_in_recovery')
def test__check_timeline_and_lsn(self, mock_check_leader_is_not_in_recovery):
mock_check_leader_is_not_in_recovery.return_value = False
self.p.trigger_check_diverged_lsn()
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
mock_check_leader_is_not_in_recovery.return_value = True
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch('psycopg2.connect', Mock(side_effect=Exception)):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[('', 2, '0/0'), ('', b'3\t0/40159C0\tn\n')])):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(return_value=('', 1, '0/0'))):
with patch.object(Postgresql, '_get_local_timeline_lsn', Mock(return_value=(1, '0/0'))):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
self.assertTrue(self.p.rewind_needed_and_possible(self.leader))
@patch.object(MockCursor, 'fetchone', Mock(side_effect=[(True,), Exception]))
def test_check_leader_is_not_in_recovery(self):
self.p.check_leader_is_not_in_recovery()
self.p.check_leader_is_not_in_recovery()
@patch.object(Postgresql, 'cancellable_subprocess_call', Mock(return_value=0))
@patch.object(Postgresql, 'checkpoint', side_effect=['', '1'])
@patch.object(Postgresql, 'stop', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_rewind(self, mock_checkpoint):
self.p.rewind(self.leader)
with patch.object(Postgresql, 'pg_rewind', Mock(return_value=False)):
mock_checkpoint.side_effect = ['1', '', '', '']
self.p.rewind(self.leader)
self.p.rewind(self.leader)
with patch.object(Postgresql, 'check_leader_is_not_in_recovery', Mock(return_value=False)):
self.p.rewind(self.leader)
self.p.config['remove_data_directory_on_rewind_failure'] = False
self.p.trigger_check_diverged_lsn()
self.p.rewind(self.leader)
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
self.p.rewind(self.leader)
self.p.is_leader = Mock(return_value=False)
self.p.rewind(self.leader)
@patch.object(Postgresql, 'is_running', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_follow(self):
m = RemoteMember('1', {'restore_command': '2', 'recovery_min_apply_delay': 3, 'archive_cleanup_command': '4'})
self.p.follow(m)
@patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string))
def test_can_rewind(self):
with patch('subprocess.call', MagicMock(return_value=1)):
self.assertFalse(self.p.can_rewind)
with patch('subprocess.call', side_effect=OSError):
self.assertFalse(self.p.can_rewind)
with patch.object(Postgresql, 'controldata', Mock(return_value={'wal_log_hints setting': 'on'})):
self.assertTrue(self.p.can_rewind)
self.p.config['use_pg_rewind'] = False
self.assertFalse(self.p.can_rewind)
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True))
def test_create_replica(self, mock_cancellable_subprocess_call):
self.p.delete_trigger_file = Mock(side_effect=OSError)
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo'}
mock_cancellable_subprocess_call.return_value = 0
self.assertEqual(self.p.create_replica(self.leader), 0)
del self.p.config['wale']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_methods'] = ['basebackup']
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['basebackup'] = [{'max_rate': '100M', 'compress': '9'}]
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as mock_logger:
self.p.create_replica(self.leader)
mock_logger.assert_called_once()
self.assertTrue("only one key-value is allowed and value should be a string" in mock_logger.call_args[0][0],
"not matching {0}".format(mock_logger.call_args[0][0]))
self.p.config['basebackup'] = [42]
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as mock_logger:
self.p.create_replica(self.leader)
mock_logger.assert_called_once()
self.assertTrue("value should be string value or a single key-value pair" in mock_logger.call_args[0][0],
"not matching {0}".format(mock_logger.call_args[0][0]))
self.p.config['basebackup'] = {"foo": "bar"}
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
del self.p.config['basebackup']
mock_cancellable_subprocess_call.return_value = 1
self.assertEqual(self.p.create_replica(self.leader), 1)
mock_cancellable_subprocess_call.side_effect = Exception('foo')
self.assertEqual(self.p.create_replica(self.leader), 1)
mock_cancellable_subprocess_call.side_effect = [1, 0]
self.assertEqual(self.p.create_replica(self.leader), 0)
mock_cancellable_subprocess_call.side_effect = [Exception(), 0]
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.cancel()
self.assertEqual(self.p.create_replica(self.leader), 1)
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True))
def test_create_replica_old_format(self, mock_cancellable_subprocess_call):
""" The same test as before but with old 'create_replica_method'
to test backward compatibility
"""
self.p.delete_trigger_file = Mock(side_effect=OSError)
self.p.config['create_replica_method'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo'}
mock_cancellable_subprocess_call.return_value = 0
self.assertEqual(self.p.create_replica(self.leader), 0)
del self.p.config['wale']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_method'] = ['basebackup']
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_method'] = ['wale', 'basebackup']
del self.p.config['basebackup']
mock_cancellable_subprocess_call.return_value = 1
self.assertEqual(self.p.create_replica(self.leader), 1)
def test_basebackup(self):
self.p.cancel()
self.p.basebackup(None, None, {'foo': 'bar'})
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_sync_replication_slots(self):
self.p.start()
cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None)
with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)):
self.p.sync_replication_slots(cluster)
self.p.sync_replication_slots(cluster)
with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
self.p.sync_replication_slots(cluster)
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as errorlog_mock:
self.p.query = Mock()
alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
cluster.members.extend([alias1, alias2])
self.p.sync_replication_slots(cluster)
errorlog_mock.assert_called_once()
self.assertTrue("test-3" in errorlog_mock.call_args[0][1],
"non matching {0}".format(errorlog_mock.call_args[0][1]))
self.assertTrue("test.3" in errorlog_mock.call_args[0][1],
"non matching {0}".format(errorlog_mock.call_args[0][1]))
@patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError))
def test__query(self):
self.assertRaises(PostgresConnectionException, self.p._query, 'blabla')
self.p._state = 'restarting'
self.assertRaises(RetryFailedError, self.p._query, 'blabla')
def test_query(self):
self.p.query('select 1')
self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError')
self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla')
@patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT))
def test_is_leader(self):
self.assertTrue(self.p.is_leader())
self.p.reset_cluster_info_state()
with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))):
self.assertRaises(PostgresConnectionException, self.p.is_leader)
def test_reload(self):
self.assertTrue(self.p.reload())
@patch.object(Postgresql, 'is_running')
def test_is_healthy(self, mock_is_running):
mock_is_running.return_value = True
self.assertTrue(self.p.is_healthy())
mock_is_running.return_value = False
self.assertFalse(self.p.is_healthy())
def test_promote(self):
self.p.set_role('replica')
self.assertIsNone(self.p.promote(0))
self.assertTrue(self.p.promote(0))
def test_timeline_wal_position(self):
self.assertEqual(self.p.timeline_wal_position(), (1, 2))
Thread(target=self.p.timeline_wal_position).start()
@patch.object(PostmasterProcess, 'from_pidfile')
def test_is_running(self, mock_frompidfile):
# Cached postmaster running
mock_postmaster = self.p._postmaster_proc = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster)
# Cached postmaster not running, no postmaster running
mock_postmaster.is_running.return_value = False
mock_frompidfile.return_value = None
self.assertEqual(self.p.is_running(), None)
self.assertEqual(self.p._postmaster_proc, None)
# No cached postmaster, postmaster running
mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster2)
self.assertEqual(self.p._postmaster_proc, mock_postmaster2)
@patch('shlex.split', Mock(side_effect=OSError))
def test_call_nowait(self):
self.p.set_role('replica')
self.assertIsNone(self.p.call_nowait('on_start'))
self.p.bootstrapping = True
self.assertIsNone(self.p.call_nowait('on_start'))
def test_non_existing_callback(self):
self.assertFalse(self.p.call_nowait('foobar'))
@patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster()))
def test_is_leader_exception(self):
self.p.start()
self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported"))
self.assertTrue(self.p.stop())
@patch('os.rename', Mock())
@patch('os.path.isdir', Mock(return_value=True))
def test_move_data_directory(self):
self.p.move_data_directory()
with patch('os.rename', Mock(side_effect=OSError)):
self.p.move_data_directory()
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_bootstrap(self):
with patch('subprocess.call', Mock(return_value=1)):
self.assertFalse(self.p.bootstrap({}))
config = {'users': {'replicator': {'password': 'rep-pass', 'options': ['replication']}}}
self.p.bootstrap(config)
with open(os.path.join(self.config_dir, 'pg_hba.conf')) as f:
lines = f.readlines()
self.assertTrue('host all all 0.0.0.0/0 md5\n' in lines)
self.p.config.pop('pg_hba')
config.update({'post_init': '/bin/false',
'pg_hba': ['host replication replicator 127.0.0.1/32 md5',
'hostssl all all 0.0.0.0/0 md5',
'host all all 0.0.0.0/0 md5']})
self.p.bootstrap(config)
with open(os.path.join(self.data_dir, 'pg_hba.conf')) as f:
lines = f.readlines()
self.assertTrue('host replication replicator 127.0.0.1/32 md5\n' in lines)
@patch.object(Postgresql, 'cancellable_subprocess_call')
def test_custom_bootstrap(self, mock_cancellable_subprocess_call):
self.p.config.pop('pg_hba')
config = {'method': 'foo', 'foo': {'command': 'bar'}}
mock_cancellable_subprocess_call.return_value = 1
self.assertFalse(self.p.bootstrap(config))
mock_cancellable_subprocess_call.return_value = 0
with patch('subprocess.Popen', Mock(side_effect=Exception("42"))),\
patch('os.path.isfile', Mock(return_value=True)),\
patch('os.unlink', Mock()),\
patch.object(Postgresql, 'save_configuration_files', Mock()),\
patch.object(Postgresql, 'restore_configuration_files', Mock()),\
patch.object(Postgresql, 'write_recovery_conf', Mock()):
with self.assertRaises(Exception) as e:
self.p.bootstrap(config)
self.assertEqual(str(e.exception), '42')
config['foo']['recovery_conf'] = {'foo': 'bar'}
with self.assertRaises(Exception) as e:
self.p.bootstrap(config)
self.assertEqual(str(e.exception), '42')
mock_cancellable_subprocess_call.side_effect = Exception
self.assertFalse(self.p.bootstrap(config))
@patch('time.sleep', Mock())
@patch('os.unlink', Mock())
@patch.object(Postgresql, 'run_bootstrap_post_init', Mock(return_value=True))
@patch.object(Postgresql, '_custom_bootstrap', Mock(return_value=True))
@patch.object(Postgresql, 'start', Mock(return_value=True))
def test_post_bootstrap(self):
config = {'method': 'foo', 'foo': {'command': 'bar'}}
self.p.bootstrap(config)
task = CriticalTask()
with patch.object(Postgresql, 'create_or_update_role', Mock(side_effect=Exception)):
self.p.post_bootstrap({}, task)
self.assertFalse(task.result)
self.p.config.pop('pg_hba')
self.p.post_bootstrap({}, task)
self.assertTrue(task.result)
self.p.bootstrap(config)
with patch.object(Postgresql, 'pending_restart', PropertyMock(return_value=True)), \
patch.object(Postgresql, 'restart', Mock()) as mock_restart:
self.p.post_bootstrap({}, task)
mock_restart.assert_called_once()
self.p.bootstrap(config)
self.p.set_state('stopped')
self.p.reload_config({'authentication': {'superuser': {'username': 'p', 'password': 'p'},
'replication': {'username': 'r', 'password': 'r'}},
'listen': '*', 'retry_timeout': 10, 'parameters': {'hba_file': 'foo'}})
with patch.object(Postgresql, 'restart', Mock()) as mock_restart:
self.p.post_bootstrap({}, task)
mock_restart.assert_called_once()
@patch.object(Postgresql, 'cancellable_subprocess_call')
def test_run_bootstrap_post_init(self, mock_cancellable_subprocess_call):
mock_cancellable_subprocess_call.return_value = 1
self.assertFalse(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.return_value = 0
self.p._superuser.pop('username')
self.assertTrue(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.assert_called()
args, kwargs = mock_cancellable_subprocess_call.call_args
self.assertTrue('PGPASSFILE' in kwargs['env'])
self.assertEqual(args[0], ['/bin/false', 'postgres://127.0.0.2:5432/postgres'])
mock_cancellable_subprocess_call.reset_mock()
self.p._local_address.pop('host')
self.assertTrue(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.assert_called()
self.assertEqual(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'postgres://:5432/postgres'])
mock_cancellable_subprocess_call.side_effect = OSError
self.assertFalse(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
@patch('patroni.postgresql.Postgresql.create_replica', Mock(return_value=0))
def test_clone(self):
self.p.clone(self.leader)
@patch('os.listdir', Mock(return_value=['recovery.conf']))
@patch('os.path.exists', Mock(return_value=True))
def test_get_postgres_role_from_data_directory(self):
self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica')
def test_remove_data_directory(self):
self.p.remove_data_directory()
open(self.data_dir, 'w').close()
self.p.remove_data_directory()
os.symlink('unexisting', self.data_dir)
with patch('os.unlink', Mock(side_effect=OSError)):
self.p.remove_data_directory()
self.p.remove_data_directory()
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
def test_controldata(self):
with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)):
data = self.p.controldata()
self.assertEqual(len(data), 50)
self.assertEqual(data['Database cluster state'], 'shut down in recovery')
self.assertEqual(data['wal_log_hints setting'], 'on')
self.assertEqual(int(data['Database block size']), 8192)
with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))):
self.assertEqual(self.p.controldata(), {})
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
@patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string))
def test_sysid(self):
self.assertEqual(self.p.sysid, "6200971513092291716")
@patch('os.path.isfile', Mock(return_value=True))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_save_configuration_files(self):
self.p.save_configuration_files()
@patch('os.path.isfile', Mock(side_effect=[False, True]))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_restore_configuration_files(self):
self.p.restore_configuration_files()
def test_can_create_replica_without_replication_connection(self):
self.p.config['create_replica_method'] = []
self.assertFalse(self.p.can_create_replica_without_replication_connection())
self.p.config['create_replica_method'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo', 'no_master': 1}
self.assertTrue(self.p.can_create_replica_without_replication_connection())
def test_replica_method_can_work_without_replication_connection(self):
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup'))
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar'))
self.p.config['foo'] = {'command': 'bar', 'no_master': 1}
self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo'))
self.p.config['foo'] = {'command': 'bar'}
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo'))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_reload_config(self):
parameters = self._PARAMETERS.copy()
parameters.pop('f.oo')
config = {'pg_hba': [''], 'use_unix_socket': True, 'authentication': {},
'retry_timeout': 10, 'listen': '*', 'parameters': parameters}
self.p.reload_config(config)
parameters['b.ar'] = 'bar'
self.p.reload_config(config)
parameters['autovacuum'] = 'on'
self.p.reload_config(config)
parameters['autovacuum'] = 'off'
parameters.pop('search_path')
config['listen'] = '*:5433'
self.p.reload_config(config)
parameters['unix_socket_directories'] = '.'
self.p.reload_config(config)
self.p.resolve_connection_addresses()
@patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
def test_get_major_version(self):
with patch.object(builtins, 'open', mock_open(read_data='9.4')):
self.assertEqual(self.p.get_major_version(), 90400)
with patch.object(builtins, 'open', Mock(side_effect=Exception)):
self.assertEqual(self.p.get_major_version(), 0)
def test_postmaster_start_time(self):
with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))):
self.assertEqual(self.p.postmaster_start_time(), 'foo')
with patch.object(MockCursor, "execute", side_effect=psycopg2.Error):
self.assertIsNone(self.p.postmaster_start_time())
def test_check_for_startup(self):
with patch('subprocess.call', return_value=0):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=1):
self.p._state = 'starting'
self.assertTrue(self.p.check_for_startup())
self.assertEqual(self.p.state, 'starting')
with patch('subprocess.call', return_value=2):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'start failed')
with patch('subprocess.call', return_value=0):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=127):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
def test_wait_for_startup(self):
state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
self.__thread_ident = current_thread().ident
def increment_sleeps(*args):
if current_thread().ident == self.__thread_ident:
print("Sleep")
state['sleeps'] += 1
def isready_return(*args):
ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return']
print("Isready {0} {1}".format(ret, state))
return ret
def time_in_state(*args):
return state['sleeps']
with patch('subprocess.call', side_effect=isready_return):
with patch('time.sleep', side_effect=increment_sleeps):
self.p.time_in_state = Mock(side_effect=time_in_state)
self.p._state = 'stopped'
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 0)
self.p._state = 'starting'
state['num_rejects'] = 5
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 5)
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 2
self.assertFalse(self.p.wait_for_startup())
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 0
self.assertFalse(self.p.wait_for_startup(timeout=2))
self.assertEqual(state['sleeps'], 3)
with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)):
self.p.cancel()
self.p._state = 'starting'
self.assertIsNone(self.p.wait_for_startup())
def test_pick_sync_standby(self):
cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None,
SyncState(0, self.me.name, self.leadermem.name), None)
with patch.object(Postgresql, "query", return_value=[
(self.leadermem.name, 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.leadermem.name, 'streaming', 'potential'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[
('missing', 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False))
def test_set_sync_standby(self):
def value_in_conf():
with open(os.path.join(self.data_dir, 'postgresql.conf')) as f:
for line in f:
if line.startswith('synchronous_standby_names'):
return line.strip()
mock_reload = self.p.reload = Mock()
self.p.set_synchronous_standby('n1')
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
mock_reload.assert_called()
mock_reload.reset_mock()
self.p.set_synchronous_standby('n1')
mock_reload.assert_not_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
self.p.set_synchronous_standby('n2')
mock_reload.assert_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'")
mock_reload.reset_mock()
self.p.set_synchronous_standby(None)
mock_reload.assert_called()
self.assertEqual(value_in_conf(), None)
def test_get_server_parameters(self):
config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'}
self.p.get_server_parameters(config)
config['synchronous_mode_strict'] = True
self.p.get_server_parameters(config)
self.p.set_synchronous_standby('foo')
self.p.get_server_parameters(config)
@patch('time.sleep', Mock())
def test__wait_for_connection_close(self):
mock_postmaster = MockPostmaster()
with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)):
mock_postmaster.is_running.side_effect = [True, False, False]
mock_callback = Mock()
self.p.stop(on_safepoint=mock_callback)
mock_postmaster.is_running.side_effect = [True, False, False]
with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)):
self.p.stop(on_safepoint=mock_callback)
def test_terminate_starting_postmaster(self):
mock_postmaster = MockPostmaster()
self.p.terminate_starting_postmaster(mock_postmaster)
mock_postmaster.signal_stop.assert_called()
mock_postmaster.wait.assert_called()
def test_read_postmaster_opts(self):
m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \
"--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \
"--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n')
with patch.object(builtins, 'open', m):
data = self.p.read_postmaster_opts()
self.assertEqual(data['wal_level'], 'hot_standby')
self.assertEqual(int(data['max_replication_slots']), 5)
self.assertEqual(data.get('D'), None)
m.side_effect = IOError
data = self.p.read_postmaster_opts()
self.assertEqual(data, dict())
@patch('subprocess.Popen')
def test_single_user_mode(self, subprocess_popen_mock):
subprocess_popen_mock.return_value.wait.return_value = 0
self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0)
@patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']]))
@patch('os.unlink', Mock(side_effect=OSError))
@patch('os.remove', Mock())
@patch('os.path.islink', Mock(side_effect=[True, False]))
@patch('os.path.isfile', Mock(return_value=True))
def test_cleanup_archive_status(self):
self.p.cleanup_archive_status()
self.p.cleanup_archive_status()
@patch('os.unlink', Mock())
@patch('os.path.isfile', Mock(return_value=True))
@patch.object(Postgresql, 'single_user_mode', Mock(return_value=0))
def test_fix_cluster_state(self):
self.assertTrue(self.p.fix_cluster_state())
def test_replica_cached_timeline(self):
self.assertEqual(self.p.replica_cached_timeline(1), 2)
def test_get_master_timeline(self):
self.assertEqual(self.p.get_master_timeline(), 1)
def test_cancellable_subprocess_call(self):
self.p.cancel()
self.assertRaises(PostgresException, self.p.cancellable_subprocess_call, communicate_input=None)
@patch('patroni.postgresql.polling_loop', Mock(return_value=[0, 0]))
def test_cancel(self):
self.p._cancellable = Mock()
self.p._cancellable.returncode = None
self.p.cancel()
type(self.p._cancellable).returncode = PropertyMock(side_effect=[None, -15])
self.p.cancel()
@patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica'))
def test__build_effective_configuration(self):
with patch.object(Postgresql, 'controldata',
Mock(return_value={'max_connections setting': '200',
'max_worker_processes setting': '20',
'max_prepared_xacts setting': '100',
'max_locks_per_xact setting': '100'})):
self.p.cancel()
self.assertFalse(self.p.start())
self.assertTrue(self.p.pending_restart)
|
Base.py
|
# Trading algorithm structure and initialization file.
import gdax, pymongo, collections, threading, sys, os, subprocess
import WStoMongo, Level2Data, HistData, DataFunc
db = pymongo.MongoClient().algodb_test
quitCall = False
print "Welcome to the Trahan Autonomous Trading Program!"
# Main program loop to start threads for various information gathering and testing purposes.
while True:
# Main input instructions
print ('''Choose File: (h = history, uh = update history, w = websocket, l2 = level2 feed,
l2g = graph level 2 data, t = ticker feed, d = delete dbs, c = calc history,
cd = delete calculations, at == test algorithm, End threads (q**): ie. ql2 quit l2 thread''')
selFile = raw_input(">>> ")
# Import historical data
if selFile == 'h' or selFile == 'H':
# State format for data draw
print ('Format for initTickerDataDraw(products, tRange, tInterval): \n prod: BTC-USD')
hProd = raw_input('Product (ex. BTC-USD): ')
tRange = int(raw_input('Time range (in days): '))
tInterval = int(raw_input('Data point interval in mins (1, 5, 15, 60, 360, 1440): '))
try:
h = threading.Thread(target = HistData.popHistory,args=(hProd, tRange, tInterval))
h.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Import historical data
if selFile == 'uh' or selFile == 'UH':
print ('Historical data will be updated')
try:
uh = threading.Thread(target=HistData.updateHistory,args=())
uh.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Run algorithm over historical data
elif selFile == 'at' or selFile == 'AT':
try:
at = threading.Thread(target = AuthClient.testAlgorithm(), args=())
at.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Calculate indicators and push to db
elif selFile == 'c' or selFile == 'C':
try:
c = threading.Thread(target = DataFunc.calcPopulateBulk, args=())
c.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Delete indicator calculations from historical data
elif selFile == 'cd' or selFile == 'CD':
try:
cd = threading.Thread(target = DataFunc.deleteCalcs, args=())
cd.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Clear database
elif selFile == 'd' or selFile == 'D':
db.algoHistTable.drop()
db.algoWebsocketTable.drop()
db.algoWStest.drop()
db.level2col.drop()
db.tickercol.drop()
db.level2current.drop()
print ('Collections cleared: \n algoHistTable \n'
' algoWebsocketTable \n algoWStest \n level2col \n'
' level2current \n tickercol ')
# Start generic feed data draw
elif selFile == 'w' or selFile == 'W':
try:
w = threading.Thread(target = WStoMongo.initDataDraw,args=())
w.setDaemon(True)
w.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Start Level 2 feed data draw
elif selFile == 'l2' or selFile == 'l2':
# State format for data draw
#print ('Format for initLevel2DataDraw(products): \n prod: BTC-USD')
#l2Prod = raw_input('Product: ')
try:
ll = threading.Thread(target = WStoMongo.initLevel2DataDraw,args=('BTC-USD',))
ll.start()
except EOFError:
print(sys.exc_info())
print('End of file')
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Start ticker feed data draw
elif selFile == 't' or selFile == 'T':
# State format for data draw
print ('Format for initTickerDataDraw(products, limiter): \n prod: BTC-USD')
# Define products
tProd = [raw_input('Product: ')]
n = int(raw_input('Limiter qty: '))
try:
t = threading.Thread(target = WStoMongo.initTickerDataDraw,args=(tProd, n))
t.start()
except EOFError:
sys.exc_info()
print('End of file')
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
# Graph current level2 data
elif selFile == 'l2g' or selFile == 'L2G':
prange = float(raw_input('Price range ($): '))
try:
g = threading.Thread(target = Level2Data.lGraph,args=(prange,))
g.start()
except:
print(sys.exc_info())
print("Error: unable to start thread")
finally:
sys.exc_clear()
elif selFile == 'ql2':
print ('# of Threads: ', threading.activeCount())
quitCall = True
# Handler for no match
else:
print 'Selection not valid (CTRL-C to quit!)'
#break
|
monitor.py
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
# 基本
from __future__ import division
import time
import uuid
import logging
import sys
import json
import threading
import os
# 访问activemq
import stomp
# 访问zookeeper
from kazoo.client import KazooClient
# 访问数据库
import psycopg2
# 通讯rpyc模块
import rpyc
from rpyc.utils.server import ThreadedServer
# 系统版本
import platform
import subprocess
# 模块及配置
from config import *
from libraries import *
# 当前worker的id
workerid=''
if STATIC_CONFIGS.has_key('WORK_ID') == True:
workerid = STATIC_CONFIGS['WORK_ID']
if workerid == '':
workerid = uuid.uuid4()
# 初始化变量
logLevel=logging.INFO
if STATIC_CONFIGS.has_key('LOGS') == True:
if STATIC_CONFIGS['LOGS']['LEVEL'] == 'DEBUG':
logLevel=logging.DEBUG
elif STATIC_CONFIGS['LOGS']['LEVEL'] == 'INFO':
logLevel=logging.INFO
elif STATIC_CONFIGS['LOGS']['LEVEL'] == 'WARN':
logLevel=logging.WARN
else:
logLevel=logging.ERROR
else:
logLevel=logging.ERROR
RPYC_SECRET_KEY=STATIC_CONFIGS['RPYCS']['SECRET_KEY']
RPYC_HOST = ''
RPYC_PORT = ''
if STATIC_CONFIGS['RPYCS'].has_key('HOST') == True:
RPYC_HOST=STATIC_CONFIGS['RPYCS']['HOST']
if STATIC_CONFIGS['RPYCS'].has_key('PORT') == True:
RPYC_PORT=STATIC_CONFIGS['RPYCS']['PORT']
# 不指定启动机器及端口,则随机生成
if RPYC_HOST == '':
RPYC_HOST = socket.gethostbyname(socket.gethostname())
if RPYC_PORT == '':
RPYC_PORT = random_port(16000,17000, 10)
ZOOKEEPER_HOSTS='127.0.0.1:2181'
ZOOKEEPER_PARENT_PATH='/test'
if STATIC_CONFIGS.has_key('ZOOKEEPERS') == True:
if STATIC_CONFIGS['ZOOKEEPERS'].has_key('HOSTS') == True:
ZOOKEEPER_HOSTS = STATIC_CONFIGS['ZOOKEEPERS']['HOSTS']
if STATIC_CONFIGS['ZOOKEEPERS'].has_key('START_PATH') == True:
ZOOKEEPER_PARENT_PATH = STATIC_CONFIGS['ZOOKEEPERS']['START_PATH']
WATCHER_SLEEP_INTEVAL=60
if STATIC_CONFIGS.has_key('WATCHER') == True:
if STATIC_CONFIGS['WATCHER'].has_key('SLEEP_INTEVAL') == True:
WATCHER_SLEEP_INTEVAL=STATIC_CONFIGS['WATCHER']['SLEEP_INTEVAL']
# 当前pid
pid= os.getpid()
# 初始化日志
logging.basicConfig(level=logLevel, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', filename=sys.path[0]+'/logs/server_' + str(workerid) + '.log', filemode='a')
logger = logging.getLogger()
# zookeeper链接
zkClient = None
UnhandledInterruptHappend=False
# default-encoding
reload(sys)
sys.setdefaultencoding('utf-8')
sysdir=os.path.abspath(os.path.dirname(__file__))
def none2str(value):
if value is not None:
return str(value)
return ''
# 获取zk节点的值
def get_zknode_str_value(zkClient, vpath):
try:
if zkClient.exists(vpath):
data, stat = zkClient.get(str(vpath),watch=None)
if data is not None and data <> '':
return data.decode('utf-8')
except Exception, e:
vprint('[%s]received a exception %s %s', ('get_zknode_str_value',vpath, str(e),), logger, logging.ERROR)
return None
# rpyc服务
class RpycManagerService(rpyc.Service):
# 登陆
def exposed_login(self,user,passwd):
if user=="OMuser" and passwd=="KJS23o4ij09gHF734iuhsdfhkGYSihoiwhj38u4h":
self.Checkout_pass=True
else:
self.Checkout_pass=False
# 查询worker节点信息
def exposed_ServerInfos(self):
try:
if self.Checkout_pass!=True:
return self.response("C_9010","User verify failed!")
except:
return self.response("C_9001", "Invalid Login!")
global zkClient
infos={}
lhostport = get_zknode_str_value(zkClient, str(ZOOKEEPER_PARENT_PATH + "/leader/leader"))
if lhostport is not None and lhostport <> '':
(lhost, lport) = lhostport.split("_")
response = self.request_leader(lhost,lport)
if response is not None:
jresponse = json.loads(response)
infos['leader'] = jresponse['msg']
whostport = get_zknode_str_value(zkClient, str(ZOOKEEPER_PARENT_PATH + "/worker/leader"))
if whostport is not None and whostport <> '':
(whost, wport) = whostport.split("_")
response = self.request_worker(whost,wport)
if response is not None:
jresponse = json.loads(response)
infos['worker'] = jresponse['msg']
infos['monitor']={}
infos['monitor']['host'] = str(RPYC_HOST)
infos['monitor']['port'] = str(RPYC_PORT)
infos['monitor']['path'] = get_zknode_str_value(zkClient, str(ZOOKEEPER_PARENT_PATH + "/worker/leader/path"))
#环境信息
try:
# 获取硬件信息
import psutil
global pid
cprocess=psutil.Process(pid)
infos['monitor']['memory']=str("%.2f M" % (psutil.virtual_memory().total/(1024*1024)))
infos['monitor']['memory_percent']=str(psutil.virtual_memory().percent) + '%'
infos['monitor']['pid_memory_percent']="%.2f%%" % (cprocess.memory_percent())
infos['monitor']['cpu_percent']=str(psutil.cpu_percent(0.5)) + '%'
infos['monitor']['pid_cpu_percent']=str(cprocess.cpu_percent()) + '%'
infos['monitor']['pid']=str(os.getpid())
except Exception, e:
vprint('QueryWorks exception: %s' ,(str(e), ), logger, logging.DEBUG)
return self.response("C_0000", json.dumps(infos))
def request_leader(self, host, port):
try:
conn=rpyc.connect(host,int(port))
conn.root.login('OMuser','KJS23o4ij09gHF734iuhsdfhkGYSihoiwhj38u4h')
except Exception,e:
return None
result = tdecode(conn.root.QuerySchedulers(),RPYC_SECRET_KEY)
return result
def request_worker(self, host, port):
try:
conn=rpyc.connect(host,int(port))
conn.root.login('OMuser','KJS23o4ij09gHF734iuhsdfhkGYSihoiwhj38u4h')
except Exception,e:
return None
result = tdecode(conn.root.QueryWorks(),RPYC_SECRET_KEY)
return result
def response(self,code, message):
dict={}
dict['code'] = str(code)
dict['msg'] = str(message)
dictstr = json.dumps(dict);
vprint('response: %s' , (dictstr,), logger, logging.DEBUG)
return tencode(json.dumps(dict),RPYC_SECRET_KEY)
def ping_host(host, port):
try:
conn=rpyc.connect(host,int(port))
result = tdecode(conn.root.pingit(),RPYC_SECRET_KEY)
jreuslt = json.loads(result)
if jreuslt['code'] == 'C_0000':
return True
except Exception,e:
pass
return False
def get_static_hosts_info(htype):
dbconn = None
dbcur = None
jstrObj=[]
try:
dbconn = psycopg2.connect(database=STATIC_CONFIGS['DATABASES']['NAME'], user=STATIC_CONFIGS['DATABASES']['USER'], password=STATIC_CONFIGS['DATABASES']['PASSWORD'], host=STATIC_CONFIGS['DATABASES']['HOST'], port=STATIC_CONFIGS['DATABASES']['PORT'])
dbcur = dbconn.cursor()
dbcur.execute("SELECT host,port,username,password FROM aom_task_hosts_static where htype=" + htype)
rows = dbcur.fetchall()
for row in rows:
jstrDict = {}
jstrDict['host'] = none2str(row[0])
jstrDict['port'] = none2str(row[1])
jstrDict['username'] = none2str(row[2])
jstrDict['password'] = none2str(row[3])
jstrObj.append(jstrDict)
return jstrObj
except Exception, e:
vprint('[%s]received a exception %s' , ('get_static_hosts_info',str(e),), logger,logging.ERROR)
finally:
if dbcur is not None:
dbcur.close()
if dbconn is not None:
dbconn.close()
return None
def run_local_executable_file_as_daemon(cmd):
try:
#p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#p.wait()
os.system(cmd)
except Exception,e:
vprint('[%s]received a exception %s',('run_local_executable_file_as_daemon',str(e),), logger,logging.ERROR)
def ssh_remote_and_restart(hostname, port, username, password, lpath):
try:
strcommand = ''
if port == '' or port == '-':
# 默认为本地
if platform.system() == "Windows":
strcommand='cmd.exe /c ' + lpath + '\startLeader.bat'
strcommand= strcommand.decode('utf-8').encode('gbk')
else:
strcommand='/bin/sh -c ' + lpath + '/startLeader.sh'
#设置线程为后台线程,Monitor被关闭,则也关闭
t =threading.Thread(target=run_local_executable_file_as_daemon,args=(strcommand,))
t.start()
vprint('[ssh_remote_and_restart]start local script as daemon thread! %s' , (str(strcommand),), logger,logging.INFO)
else:
if password <> '':
import paramiko
npassword = tdecode(password,RPYC_SECRET_KEY)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
ssh.connect(hostname, port=int(port), username=username, password=password)
#默认远程机器为Linux
strcommand='/bin/sh -c ' + lpath + '/startLeader.sh'
stdin, stdout, stderr = ssh.exec_command(strcommand)
ssh.close()
vprint('[ssh_remote_and_restart]start remote script by ssh! %s' ,(str(strcommand),), logger,logging.INFO)
else:
pass
except Exception,e:
vprint('[%s]received a exception %s',('ssh_remote_and_restart',str(e),), logger,logging.ERROR)
def judge_thread_alive_or_not(zk_hp_path, zk_wk_host, htype):
global zkClient
lhostport = get_zknode_str_value(zkClient, zk_hp_path)
thread_name = 'schedulers leader thread'
if htype == "1":
thread_name = 'task leader thread'
else:
pass
if lhostport is not None and lhostport <> '':
(lhost, lport) = lhostport.split('_')
if ping_host(lhost, lport) == False:
vprint('%s is not start or all dead !' ,(thread_name,), logger, logging.INFO)
lpath = get_zknode_str_value(zkClient, zk_wk_host)
if lpath is not None and lpath <> '':
shosts = get_static_hosts_info(htype)
if shosts is not None:
bfound = False
for i in range(0, len(shosts)):
if shosts[i]['host'] == lhost:
# 尝试重启服务
bfound = True
vprint('location of %s has been found !',(thread_name,), logger, logging.INFO)
ssh_remote_and_restart(shosts[i]['host'], shosts[i]['port'], shosts[i]['username'], shosts[i]['password'], lpath)
break
if bfound == False:
vprint('skip to restart %s, cause cant find connection infos !',(thread_name,), logger, logging.INFO)
else:
vprint('cant find static host for %s !', (thread_name,), logger, logging.INFO)
else:
pass
else:
pass
# 监视线程
def worker_monitor(args):
while UnhandledInterruptHappend <> True:
try:
judge_thread_alive_or_not(str(ZOOKEEPER_PARENT_PATH + "/leader/leader"),str(ZOOKEEPER_PARENT_PATH + "/leader/leader/path"), "0")
judge_thread_alive_or_not(str(ZOOKEEPER_PARENT_PATH + "/worker/leader"),str(ZOOKEEPER_PARENT_PATH + "/worker/leader/path"), "1")
time.sleep(WATCHER_SLEEP_INTEVAL)
except Exception, e:
vprint('[%s]received a exception %s', ('worker_monitor',str(e),), logger, logging.ERROR)
break
vprint('worker_monitor thread is stop!', None, logger, logging.INFO)
# 选举成为leader线程后,执行函数
def taskdispacher_leader_func():
vprint('task %s became leader!' , (str(workerid),), logger, logging.INFO)
try:
vprint('start observation thread for followed worker!', None, logger, logging.INFO)
watherthread = threading.Thread(target=worker_monitor,args=(None,))
watherthread.start()
vprint('start rpyc connection thread! %s %s' ,(str(RPYC_HOST),str(RPYC_PORT),), logger, logging.INFO)
rpycserver=ThreadedServer(RpycManagerService,port=int(RPYC_PORT),auto_register=False)
rpycserver.start()
except Exception, e:
vprint('[%s]received a exception %s', ('taskdispacher_leader_func',str(e),), logger, logging.ERROR)
def set_interrupt_happend():
global UnhandledInterruptHappend
UnhandledInterruptHappend = True
if __name__ == '__main__':
try:
vprint('task %s is start!', (str(workerid),), logger, logging.INFO)
vprint('connect to zookeeper hosts:=%s!' , (ZOOKEEPER_HOSTS,), logger, logging.INFO)
# 连接zookeeper
zkClient = KazooClient(hosts=ZOOKEEPER_HOSTS)
zkClient.start()
if not zkClient.exists(ZOOKEEPER_PARENT_PATH + "/monitor/electionpath"):
# 确认路径,如果有必要则创建该路径
zkClient.ensure_path(ZOOKEEPER_PARENT_PATH + "/monitor/electionpath")
vprint('waiting for the election!', None, logger, logging.INFO)
# 阻塞线程,直到选举成功。并调用taskdispacher_leader_func
election = zkClient.Election(ZOOKEEPER_PARENT_PATH + "/monitor/electionpath")
election.run(taskdispacher_leader_func)
except Exception, e:
vprint('[%s]received a exception %s',('main',str(e),), logger, logging.ERROR)
finally:
set_interrupt_happend()
try:
if zkClient is not None:
zkClient.stop()
except:
pass
try:
if rpycserver is not None:
rpycserver.close()
except:
pass
vprint('task %s is stop!', (str(workerid),), logger, logging.INFO)
|
__init__.py
|
#!/usr/bin/env python3
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
#
# Copy this script to /sbin/mount.efs and make sure it is executable.
#
# You will be able to mount an EFS file system by its short name, by adding it
# to /etc/fstab. The syntax of an fstab entry is:
#
# [Device] [Mount Point] [File System Type] [Options] [Dump] [Pass]
#
# Add an entry like this:
#
# fs-deadbeef /mount_point efs _netdev 0 0
#
# Using the 'efs' type will cause '/sbin/mount.efs' to be called by 'mount -a'
# for this file system. The '_netdev' option tells the init system that the
# 'efs' type is a networked file system type. This has been tested with systemd
# (Amazon Linux 2, CentOS 7, RHEL 7, Debian 9, and Ubuntu 16.04), and upstart
# (Amazon Linux 2017.09).
#
# Once there is an entry in fstab, the file system can be mounted with:
#
# sudo mount /mount_point
#
# The script will add recommended mount options, if not provided in fstab.
import base64
import errno
import hashlib
import hmac
import json
import logging
import os
import platform
import pwd
import random
import re
import socket
import subprocess
import sys
import threading
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
try:
from configparser import ConfigParser, NoOptionError, NoSectionError
except ImportError:
import ConfigParser
from ConfigParser import NoOptionError, NoSectionError
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
try:
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
except ImportError:
from urllib2 import URLError, HTTPError, build_opener, urlopen, Request, HTTPHandler
from urllib import urlencode
try:
import botocore.session
from botocore.exceptions import (
ClientError,
NoCredentialsError,
EndpointConnectionError,
ProfileNotFound,
)
BOTOCORE_PRESENT = True
except ImportError:
BOTOCORE_PRESENT = False
VERSION = "1.31.3"
SERVICE = "elasticfilesystem"
CLONE_NEWNET = 0x40000000
CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf"
CONFIG_SECTION = "mount"
CLIENT_INFO_SECTION = "client-info"
CLIENT_SOURCE_STR_LEN_LIMIT = 100
# Cloudwatchlog agent dict includes cloudwatchlog botocore client, cloudwatchlog group name, cloudwatchlog stream name
CLOUDWATCHLOG_AGENT = None
CLOUDWATCH_LOG_SECTION = "cloudwatch-log"
DEFAULT_CLOUDWATCH_LOG_GROUP = "/aws/efs/utils"
DEFAULT_FALLBACK_ENABLED = True
DEFAULT_RETENTION_DAYS = 14
DEFAULT_UNKNOWN_VALUE = "unknown"
# 50ms
DEFAULT_TIMEOUT = 0.05
DEFAULT_MACOS_VALUE = "macos"
DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM = "disable_fetch_ec2_metadata_token"
FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM = (
"fall_back_to_mount_target_ip_address_enabled"
)
INSTANCE_IDENTITY = None
OPTIMIZE_READAHEAD_ITEM = "optimize_readahead"
LOG_DIR = "/var/log/amazon/efs"
LOG_FILE = "mount.log"
STATE_FILE_DIR = "/var/run/efs"
PRIVATE_KEY_FILE = "/etc/amazon/efs/privateKey.pem"
DATE_ONLY_FORMAT = "%Y%m%d"
SIGV4_DATETIME_FORMAT = "%Y%m%dT%H%M%SZ"
CERT_DATETIME_FORMAT = "%y%m%d%H%M%SZ"
AWS_CREDENTIALS_FILE = os.path.expanduser(
os.path.join("~" + pwd.getpwuid(os.getuid()).pw_name, ".aws", "credentials")
)
AWS_CONFIG_FILE = os.path.expanduser(
os.path.join("~" + pwd.getpwuid(os.getuid()).pw_name, ".aws", "config")
)
CA_CONFIG_BODY = """dir = %s
RANDFILE = $dir/database/.rand
[ ca ]
default_ca = local_ca
[ local_ca ]
database = $dir/database/index.txt
serial = $dir/database/serial
private_key = %s
cert = $dir/certificate.pem
new_certs_dir = $dir/certs
default_md = sha256
preserve = no
policy = efsPolicy
x509_extensions = v3_ca
[ efsPolicy ]
CN = supplied
[ req ]
prompt = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
CN = %s
%s
%s
%s
"""
# SigV4 Auth
ALGORITHM = "AWS4-HMAC-SHA256"
AWS4_REQUEST = "aws4_request"
HTTP_REQUEST_METHOD = "GET"
CANONICAL_URI = "/"
CANONICAL_HEADERS_DICT = {"host": "%s"}
CANONICAL_HEADERS = "\n".join(
["%s:%s" % (k, v) for k, v in sorted(CANONICAL_HEADERS_DICT.items())]
)
SIGNED_HEADERS = ";".join(CANONICAL_HEADERS_DICT.keys())
REQUEST_PAYLOAD = ""
FS_ID_RE = re.compile("^(?P<fs_id>fs-[0-9a-f]+)$")
EFS_FQDN_RE = re.compile(
r"^((?P<az>[a-z0-9-]+)\.)?(?P<fs_id>fs-[0-9a-f]+)\.efs\."
r"(?P<region>[a-z0-9-]+)\.(?P<dns_name_suffix>[a-z0-9.]+)$"
)
AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$")
CREDENTIALS_KEYS = ["AccessKeyId", "SecretAccessKey", "Token"]
ECS_URI_ENV = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
ECS_TASK_METADATA_API = "http://169.254.170.2"
WEB_IDENTITY_ROLE_ARN_ENV = "AWS_ROLE_ARN"
WEB_IDENTITY_TOKEN_FILE_ENV = "AWS_WEB_IDENTITY_TOKEN_FILE"
STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/"
INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token"
INSTANCE_METADATA_SERVICE_URL = (
"http://169.254.169.254/latest/dynamic/instance-identity/document/"
)
INSTANCE_IAM_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
NAMED_PROFILE_HELP_URL = (
"https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html"
)
CONFIG_FILE_SETTINGS_HELP_URL = (
"https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html"
"#cli-configure-files-settings"
)
SECURITY_CREDS_ECS_URI_HELP_URL = (
"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html"
)
SECURITY_CREDS_WEBIDENTITY_HELP_URL = "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html"
SECURITY_CREDS_IAM_ROLE_HELP_URL = (
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html"
)
DEFAULT_STUNNEL_VERIFY_LEVEL = 2
DEFAULT_STUNNEL_CAFILE = "/etc/amazon/efs/efs-utils.crt"
NOT_BEFORE_MINS = 15
NOT_AFTER_HOURS = 3
EFS_ONLY_OPTIONS = [
"accesspoint",
"awscredsuri",
"awsprofile",
"az",
"cafile",
"iam",
"mounttargetip",
"netns",
"noocsp",
"notls",
"ocsp",
"tls",
"tlsport",
"verify",
]
UNSUPPORTED_OPTIONS = ["capath"]
STUNNEL_GLOBAL_CONFIG = {
"fips": "no",
"foreground": "yes",
"socket": [
"l:SO_REUSEADDR=yes",
"a:SO_BINDTODEVICE=lo",
],
}
STUNNEL_EFS_CONFIG = {
"client": "yes",
"accept": "127.0.0.1:%s",
"connect": "%s:2049",
"sslVersion": "TLSv1.2",
"renegotiation": "no",
"TIMEOUTbusy": "20",
"TIMEOUTclose": "0",
"TIMEOUTidle": "70",
"delay": "yes",
}
WATCHDOG_SERVICE = "amazon-efs-mount-watchdog"
# MacOS instances use plist files. This files needs to be loaded on launchctl (init system of MacOS)
WATCHDOG_SERVICE_PLIST_PATH = "/Library/LaunchAgents/amazon-efs-mount-watchdog.plist"
SYSTEM_RELEASE_PATH = "/etc/system-release"
OS_RELEASE_PATH = "/etc/os-release"
RHEL8_RELEASE_NAME = "Red Hat Enterprise Linux release 8"
CENTOS8_RELEASE_NAME = "CentOS Linux release 8"
ORACLE_RELEASE_NAME = "Oracle Linux Server release 8"
FEDORA_RELEASE_NAME = "Fedora release"
OPEN_SUSE_LEAP_RELEASE_NAME = "openSUSE Leap"
SUSE_RELEASE_NAME = "SUSE Linux Enterprise Server"
MACOS_BIG_SUR_RELEASE = "macOS-11"
ROCKY8_RELEASE_NAME = "Rocky Linux release 8"
ALMALINUX8_RELEASE_NAME = "AlmaLinux release 8"
AMAZON_LINUX_2022_RELEASE_NAME = "Amazon Linux release 2022"
SKIP_NO_LIBWRAP_RELEASES = [
RHEL8_RELEASE_NAME,
CENTOS8_RELEASE_NAME,
FEDORA_RELEASE_NAME,
OPEN_SUSE_LEAP_RELEASE_NAME,
SUSE_RELEASE_NAME,
MACOS_BIG_SUR_RELEASE,
ORACLE_RELEASE_NAME,
ROCKY8_RELEASE_NAME,
AMAZON_LINUX_2022_RELEASE_NAME,
ALMALINUX8_RELEASE_NAME,
]
# Multiplier for max read ahead buffer size
# Set default as 15 aligning with prior linux kernel 5.4
DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER = 15
NFS_READAHEAD_CONFIG_PATH_FORMAT = "/sys/class/bdi/0:%s/read_ahead_kb"
NFS_READAHEAD_OPTIMIZE_LINUX_KERNEL_MIN_VERSION = [5, 4]
# MacOS does not support the property of Socket SO_BINDTODEVICE in stunnel configuration
SKIP_NO_SO_BINDTODEVICE_RELEASES = [MACOS_BIG_SUR_RELEASE]
MAC_OS_PLATFORM_LIST = ["darwin"]
# MacOS Versions : Big Sur - 20.*, Catalina - 19.*, Mojave - 18.*. Catalina and Mojave are not supported for now
MAC_OS_SUPPORTED_VERSION_LIST = ["20"]
def errcheck(ret, func, args):
from ctypes import get_errno
if ret == -1:
e = get_errno()
raise OSError(e, os.strerror(e))
def setns(fd, nstype):
from ctypes import CDLL
libc = CDLL("libc.so.6", use_errno=True)
libc.setns.errcheck = errcheck
if hasattr(fd, "fileno"):
fd = fd.fileno()
return libc.setns(fd, nstype)
class NetNS(object):
# Open sockets from given network namespace: stackoverflow.com/questions/28846059
def __init__(self, nspath):
self.original_nspath = "/proc/%d/ns/net" % os.getpid()
self.target_nspath = nspath
def __enter__(self):
self.original_namespace = open(self.original_nspath)
with open(self.target_nspath) as fd:
setns(fd, CLONE_NEWNET)
def __exit__(self, *args):
setns(self.original_namespace, CLONE_NEWNET)
self.original_namespace.close()
class FallbackException(Exception):
"""Exception raised for errors happens when dns resolve and fallback to mount target ip address attempt both fail
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
super().__init__(self.message)
def fatal_error(user_message, log_message=None, exit_code=1):
if log_message is None:
log_message = user_message
sys.stderr.write("%s\n" % user_message)
logging.error(log_message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, "Mount failed, %s" % log_message)
sys.exit(exit_code)
def get_target_region(config):
def _fatal_error(message):
fatal_error(
'Error retrieving region. Please set the "region" parameter '
"in the efs-utils configuration file.",
message,
)
try:
return config.get(CONFIG_SECTION, "region")
except NoOptionError:
pass
try:
return get_region_from_instance_metadata(config)
except Exception as e:
metadata_exception = e
logging.warning(
"Region not found in config file and metadata service call failed, falling back "
'to legacy "dns_name_format" check'
)
try:
region = get_region_from_legacy_dns_format(config)
sys.stdout.write(
'Warning: region obtained from "dns_name_format" field. Please set the "region" '
"parameter in the efs-utils configuration file."
)
return region
except Exception:
logging.warning('Legacy check for region in "dns_name_format" failed')
_fatal_error(metadata_exception)
def get_target_az(config, options):
if "az" in options:
return options.get("az")
try:
return get_az_from_instance_metadata(config)
except Exception as e:
logging.warning("Get AZ via metadata service call failed, %s", e)
return None
def get_region_from_instance_metadata(config):
instance_identity = get_instance_identity_info_from_instance_metadata(
config, "region"
)
if not instance_identity:
raise Exception(
"Cannot retrieve region from instance_metadata. "
"Please set the 'region' parameter in the efs-utils configuration file."
)
return instance_identity
def get_az_from_instance_metadata(config):
instance_identity = get_instance_identity_info_from_instance_metadata(
config, "availabilityZone"
)
if not instance_identity:
raise Exception("Cannot retrieve az from instance_metadata")
return instance_identity
def get_instance_identity_info_from_instance_metadata(config, property):
logging.debug("Retrieve property %s from instance metadata", property)
ec2_metadata_unsuccessful_resp = (
"Unsuccessful retrieval of EC2 metadata at %s." % INSTANCE_METADATA_SERVICE_URL
)
ec2_metadata_url_error_msg = (
"Unable to reach %s to retrieve EC2 instance metadata."
% INSTANCE_METADATA_SERVICE_URL
)
global INSTANCE_IDENTITY
if INSTANCE_IDENTITY:
logging.debug(
"Instance metadata already retrieved in previous call, use the cached values."
)
instance_identity = INSTANCE_IDENTITY
else:
instance_identity = url_request_helper(
config,
INSTANCE_METADATA_SERVICE_URL,
ec2_metadata_unsuccessful_resp,
ec2_metadata_url_error_msg,
)
INSTANCE_IDENTITY = instance_identity
if instance_identity:
try:
return instance_identity[property]
except KeyError as e:
logging.warning(
"%s not present in %s: %s" % (property, instance_identity, e)
)
except TypeError as e:
logging.warning(
"response %s is not a json object: %s" % (instance_identity, e)
)
return None
def get_region_from_legacy_dns_format(config):
"""
For backwards compatibility check dns_name_format to obtain the target region. This functionality
should only be used if region is not present in the config file and metadata calls fail.
"""
dns_name_format = config.get(CONFIG_SECTION, "dns_name_format")
if "{region}" not in dns_name_format:
split_dns_name_format = dns_name_format.split(".")
if "{dns_name_suffix}" in dns_name_format:
return split_dns_name_format[-2]
elif "amazonaws.com" in dns_name_format:
return split_dns_name_format[-3]
raise Exception("Region not found in dns_name_format")
def get_boolean_config_item_value(
config, config_section, config_item, default_value, emit_warning_message=True
):
warning_message = None
if not config.has_section(config_section):
warning_message = (
"Warning: config file does not have section %s." % config_section
)
elif not config.has_option(config_section, config_item):
warning_message = (
"Warning: config file does not have %s item in section %s."
% (config_item, config_section)
)
if warning_message:
if emit_warning_message:
sys.stdout.write(
"%s. You should be able to find a new config file in the same folder as current config file %s. "
"Consider update the new config file to latest config file. Use the default value [%s = %s]."
% (warning_message, CONFIG_FILE, config_item, default_value)
)
return default_value
return config.getboolean(config_section, config_item)
def fetch_ec2_metadata_token_disabled(config):
return get_boolean_config_item_value(
config,
CONFIG_SECTION,
DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM,
default_value=False,
)
def get_aws_ec2_metadata_token(timeout=DEFAULT_TIMEOUT):
# Normally the session token is fetched within 10ms, setting a timeout of 50ms here to abort the request
# and return None if the token has not returned within 50ms
try:
opener = build_opener(HTTPHandler)
request = Request(INSTANCE_METADATA_TOKEN_URL)
request.add_header("X-aws-ec2-metadata-token-ttl-seconds", "21600")
request.get_method = lambda: "PUT"
try:
res = opener.open(request, timeout=timeout)
return res.read()
except socket.timeout:
exception_message = "Timeout when getting the aws ec2 metadata token"
except HTTPError as e:
exception_message = "Failed to fetch token due to %s" % e
except Exception as e:
exception_message = (
"Unknown error when fetching aws ec2 metadata token, %s" % e
)
logging.debug(exception_message)
return None
except NameError:
headers = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"}
req = Request(INSTANCE_METADATA_TOKEN_URL, headers=headers, method="PUT")
try:
res = urlopen(req, timeout=timeout)
return res.read()
except socket.timeout:
exception_message = "Timeout when getting the aws ec2 metadata token"
except HTTPError as e:
exception_message = "Failed to fetch token due to %s" % e
except Exception as e:
exception_message = (
"Unknown error when fetching aws ec2 metadata token, %s" % e
)
logging.debug(exception_message)
return None
def get_aws_security_credentials(
config, use_iam, region, awsprofile=None, aws_creds_uri=None
):
"""
Lookup AWS security credentials (access key ID and secret access key). Adapted credentials provider chain from:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html and
https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html
"""
if not use_iam:
return None, None
# attempt to lookup AWS security credentials through the credentials URI the ECS agent generated
if aws_creds_uri:
return get_aws_security_credentials_from_ecs(config, aws_creds_uri, True)
# attempt to lookup AWS security credentials in AWS credentials file (~/.aws/credentials)
# and configs file (~/.aws/config) with given awsprofile
# if the credentials are not present in above filepath, and botocore is present, attempt to assume the given awsprofile
if awsprofile:
return get_aws_security_credentials_from_awsprofile(awsprofile, True)
# attempt to lookup AWS security credentials through AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable
if ECS_URI_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_ecs(
config, os.environ[ECS_URI_ENV], False
)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials through AssumeRoleWithWebIdentity
# (e.g. for IAM Role for Service Accounts (IRSA) approach on EKS)
if (
WEB_IDENTITY_ROLE_ARN_ENV in os.environ
and WEB_IDENTITY_TOKEN_FILE_ENV in os.environ
):
credentials, credentials_source = get_aws_security_credentials_from_webidentity(
config,
os.environ[WEB_IDENTITY_ROLE_ARN_ENV],
os.environ[WEB_IDENTITY_TOKEN_FILE_ENV],
region,
False,
)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials with IAM role name attached to instance
# through IAM role name security credentials lookup uri
iam_role_name = get_iam_role_name(config)
if iam_role_name:
(
credentials,
credentials_source,
) = get_aws_security_credentials_from_instance_metadata(config, iam_role_name)
if credentials and credentials_source:
return credentials, credentials_source
error_msg = (
"AWS Access Key ID and Secret Access Key are not found in AWS credentials file (%s), config file (%s), "
"from ECS credentials relative uri, or from the instance security credentials service"
% (AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE)
)
fatal_error(error_msg, error_msg)
def get_aws_security_credentials_from_awsprofile(awsprofile, is_fatal=False):
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
if os.path.exists(file_path):
credentials = credentials_file_helper(file_path, awsprofile)
if credentials["AccessKeyId"]:
logging.debug("Retrieved credentials from %s" % file_path)
return credentials, os.path.basename(file_path) + ":" + awsprofile
# If credentials are not defined in the aws credentials and config file, attempt to assume the named profile
credentials = botocore_credentials_helper(awsprofile)
if credentials["AccessKeyId"]:
logging.debug("Retrieved credentials from assumed profile %s" % awsprofile)
return credentials, "named_profile:" + awsprofile
# Fail if credentials cannot be fetched from the given awsprofile
if is_fatal:
log_message = (
"AWS security credentials not found in %s or %s under named profile [%s]"
% (AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE, awsprofile)
)
fatal_error(log_message)
else:
return None, None
def get_aws_security_credentials_from_ecs(config, aws_creds_uri, is_fatal=False):
ecs_uri = ECS_TASK_METADATA_API + aws_creds_uri
ecs_unsuccessful_resp = (
"Unsuccessful retrieval of AWS security credentials at %s." % ecs_uri
)
ecs_url_error_msg = (
"Unable to reach %s to retrieve AWS security credentials. See %s for more info."
% (ecs_uri, SECURITY_CREDS_ECS_URI_HELP_URL)
)
ecs_security_dict = url_request_helper(
config, ecs_uri, ecs_unsuccessful_resp, ecs_url_error_msg
)
if ecs_security_dict and all(k in ecs_security_dict for k in CREDENTIALS_KEYS):
return ecs_security_dict, "ecs:" + aws_creds_uri
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(ecs_unsuccessful_resp, ecs_unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_webidentity(
config, role_arn, token_file, region, is_fatal=False
):
try:
with open(token_file, "r") as f:
token = f.read()
except Exception as e:
if is_fatal:
unsuccessful_resp = "Error reading token file %s: %s" % (token_file, e)
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region)
webidentity_url = (
STS_ENDPOINT_URL
+ "?"
+ urlencode(
{
"Version": "2011-06-15",
"Action": "AssumeRoleWithWebIdentity",
"RoleArn": role_arn,
"RoleSessionName": "efs-mount-helper",
"WebIdentityToken": token,
}
)
)
unsuccessful_resp = (
"Unsuccessful retrieval of AWS security credentials at %s." % STS_ENDPOINT_URL
)
url_error_msg = (
"Unable to reach %s to retrieve AWS security credentials. See %s for more info."
% (STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL)
)
resp = url_request_helper(
config,
webidentity_url,
unsuccessful_resp,
url_error_msg,
headers={"Accept": "application/json"},
)
if resp:
creds = (
resp.get("AssumeRoleWithWebIdentityResponse", {})
.get("AssumeRoleWithWebIdentityResult", {})
.get("Credentials", {})
)
if all(k in creds for k in ["AccessKeyId", "SecretAccessKey", "SessionToken"]):
return {
"AccessKeyId": creds["AccessKeyId"],
"SecretAccessKey": creds["SecretAccessKey"],
"Token": creds["SessionToken"],
}, "webidentity:" + ",".join([role_arn, token_file])
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_instance_metadata(config, iam_role_name):
security_creds_lookup_url = INSTANCE_IAM_URL + iam_role_name
unsuccessful_resp = (
"Unsuccessful retrieval of AWS security credentials at %s."
% security_creds_lookup_url
)
url_error_msg = (
"Unable to reach %s to retrieve AWS security credentials. See %s for more info."
% (security_creds_lookup_url, SECURITY_CREDS_IAM_ROLE_HELP_URL)
)
iam_security_dict = url_request_helper(
config, security_creds_lookup_url, unsuccessful_resp, url_error_msg
)
if iam_security_dict and all(k in iam_security_dict for k in CREDENTIALS_KEYS):
return iam_security_dict, "metadata:"
else:
return None, None
def get_iam_role_name(config):
iam_role_unsuccessful_resp = (
"Unsuccessful retrieval of IAM role name at %s." % INSTANCE_IAM_URL
)
iam_role_url_error_msg = (
"Unable to reach %s to retrieve IAM role name. See %s for more info."
% (INSTANCE_IAM_URL, SECURITY_CREDS_IAM_ROLE_HELP_URL)
)
iam_role_name = url_request_helper(
config, INSTANCE_IAM_URL, iam_role_unsuccessful_resp, iam_role_url_error_msg
)
return iam_role_name
def credentials_file_helper(file_path, awsprofile):
aws_credentials_configs = read_config(file_path)
credentials = {"AccessKeyId": None, "SecretAccessKey": None, "Token": None}
try:
access_key = aws_credentials_configs.get(awsprofile, "aws_access_key_id")
secret_key = aws_credentials_configs.get(awsprofile, "aws_secret_access_key")
session_token = aws_credentials_configs.get(awsprofile, "aws_session_token")
credentials["AccessKeyId"] = access_key
credentials["SecretAccessKey"] = secret_key
credentials["Token"] = session_token
except NoOptionError as e:
if "aws_access_key_id" in str(e) or "aws_secret_access_key" in str(e):
logging.debug(
"aws_access_key_id or aws_secret_access_key not found in %s under named profile [%s]",
file_path,
awsprofile,
)
if "aws_session_token" in str(e):
logging.debug("aws_session_token not found in %s", file_path)
credentials["AccessKeyId"] = aws_credentials_configs.get(
awsprofile, "aws_access_key_id"
)
credentials["SecretAccessKey"] = aws_credentials_configs.get(
awsprofile, "aws_secret_access_key"
)
except NoSectionError:
logging.debug("No [%s] section found in config file %s", awsprofile, file_path)
return credentials
def botocore_credentials_helper(awsprofile):
# This method retrieves credentials from aws named profile using botocore, botocore will then assume that named profile, get
# and return the credentials
credentials = {"AccessKeyId": None, "SecretAccessKey": None, "Token": None}
if not BOTOCORE_PRESENT:
logging.error(
"Cannot find credentials for %s, to assume this profile, please install botocore first."
% awsprofile
)
return credentials
session = botocore.session.get_session()
session.set_config_variable("profile", awsprofile)
try:
frozen_credentials = session.get_credentials().get_frozen_credentials()
except ProfileNotFound as e:
fatal_error(
"%s, please add the [profile %s] section in the aws config file following %s and %s."
% (e, awsprofile, NAMED_PROFILE_HELP_URL, CONFIG_FILE_SETTINGS_HELP_URL)
)
credentials["AccessKeyId"] = frozen_credentials.access_key
credentials["SecretAccessKey"] = frozen_credentials.secret_key
credentials["Token"] = frozen_credentials.token
return credentials
def get_aws_profile(options, use_iam):
awsprofile = options.get("awsprofile")
if not awsprofile and use_iam:
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
aws_credentials_configs = read_config(file_path)
# check if aws access key id is found under [default] section in current file and return 'default' if so
try:
access_key = aws_credentials_configs.get("default", "aws_access_key_id")
if access_key is not None:
return "default"
except (NoSectionError, NoOptionError):
continue
return awsprofile
def is_instance_metadata_url(url):
return url.startswith("http://169.254.169.254")
def url_request_helper(config, url, unsuccessful_resp, url_error_msg, headers={}):
try:
req = Request(url)
for k, v in headers.items():
req.add_header(k, v)
if not fetch_ec2_metadata_token_disabled(config) and is_instance_metadata_url(
url
):
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
# IMDSv1 is a request/response method to access instance metadata
# IMDSv2 is a session-oriented method to access instance metadata
# We expect the token retrieve will fail in bridge networking environment (e.g. container) since the default hop
# limit for getting the token is 1. If the token retrieve does timeout, we fallback to use IMDSv1 instead
token = get_aws_ec2_metadata_token()
if token:
req.add_header("X-aws-ec2-metadata-token", token)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
except socket.timeout:
err_msg = "Request timeout"
except HTTPError as e:
# For instance enable with IMDSv2 and fetch token disabled, Unauthorized 401 error will be thrown
if (
e.code == 401
and fetch_ec2_metadata_token_disabled(config)
and is_instance_metadata_url(url)
):
logging.warning(
"Unauthorized request to instance metadata url %s, IMDSv2 is enabled on the instance, while fetching "
"ec2 metadata token is disabled. Please set the value of config item "
'"%s" to "false" in config file %s.'
% (url, DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM, CONFIG_FILE)
)
err_msg = "Unable to reach the url at %s: status=%d, reason is %s" % (
url,
e.code,
e.reason,
)
except URLError as e:
err_msg = "Unable to reach the url at %s, reason is %s" % (url, e.reason)
if err_msg:
logging.debug("%s %s", url_error_msg, err_msg)
return None
def get_resp_obj(request_resp, url, unsuccessful_resp):
if request_resp.getcode() != 200:
logging.debug(
unsuccessful_resp + " %s: ResponseCode=%d", url, request_resp.getcode()
)
return None
resp_body = request_resp.read()
resp_body_type = type(resp_body)
try:
if resp_body_type is str:
resp_dict = json.loads(resp_body)
else:
resp_dict = json.loads(
resp_body.decode(
request_resp.headers.get_content_charset() or "us-ascii"
)
)
return resp_dict
except ValueError as e:
logging.info(
'ValueError parsing "%s" into json: %s. Returning response body.'
% (str(resp_body), e)
)
return resp_body if resp_body_type is str else resp_body.decode("utf-8")
def parse_options(options):
opts = {}
for o in options.split(","):
if "=" in o:
k, v = o.split("=")
opts[k] = v
else:
opts[o] = None
return opts
def get_tls_port_range(config):
lower_bound = config.getint(CONFIG_SECTION, "port_range_lower_bound")
upper_bound = config.getint(CONFIG_SECTION, "port_range_upper_bound")
if lower_bound >= upper_bound:
fatal_error(
'Configuration option "port_range_upper_bound" defined as %d '
'must be strictly greater than "port_range_lower_bound" defined as %d.'
% (upper_bound, lower_bound)
)
return lower_bound, upper_bound
def choose_tls_port(config, options):
if "tlsport" in options:
ports_to_try = [int(options["tlsport"])]
else:
lower_bound, upper_bound = get_tls_port_range(config)
tls_ports = list(range(lower_bound, upper_bound))
# Choose a random midpoint, and then try ports in-order from there
mid = random.randrange(len(tls_ports))
ports_to_try = tls_ports[mid:] + tls_ports[:mid]
assert len(tls_ports) == len(ports_to_try)
if "netns" not in options:
tls_port = find_tls_port_in_range(ports_to_try)
else:
with NetNS(nspath=options["netns"]):
tls_port = find_tls_port_in_range(ports_to_try)
if tls_port:
return tls_port
if "tlsport" in options:
fatal_error(
"Specified port [%s] is unavailable. Try selecting a different port."
% options["tlsport"]
)
else:
fatal_error(
"Failed to locate an available port in the range [%d, %d], try specifying a different port range in %s"
% (lower_bound, upper_bound, CONFIG_FILE)
)
def find_tls_port_in_range(ports_to_try):
sock = socket.socket()
for tls_port in ports_to_try:
try:
logging.info("binding %s", tls_port)
sock.bind(("localhost", tls_port))
sock.close()
return tls_port
except socket.error as e:
logging.info(e)
continue
sock.close()
return None
def is_ocsp_enabled(config, options):
if "ocsp" in options:
return True
elif "noocsp" in options:
return False
else:
return get_boolean_config_item_value(
config, CONFIG_SECTION, "stunnel_check_cert_validity", default_value=False
)
def get_mount_specific_filename(fs_id, mountpoint, tls_port):
return "%s.%s.%d" % (
fs_id,
os.path.abspath(mountpoint).replace(os.sep, ".").lstrip("."),
tls_port,
)
def serialize_stunnel_config(config, header=None):
lines = []
if header:
lines.append("[%s]" % header)
for k, v in config.items():
if type(v) is list:
for item in v:
lines.append("%s = %s" % (k, item))
else:
lines.append("%s = %s" % (k, v))
return lines
def add_stunnel_ca_options(efs_config, config, options, region):
if "cafile" in options:
stunnel_cafile = options["cafile"]
else:
try:
config_section = get_config_section(config, region)
stunnel_cafile = config.get(config_section, "stunnel_cafile")
logging.debug(
"Using stunnel_cafile %s in config section [%s]",
stunnel_cafile,
config_section,
)
except NoOptionError:
logging.debug(
"No CA file configured, using default CA file %s",
DEFAULT_STUNNEL_CAFILE,
)
stunnel_cafile = DEFAULT_STUNNEL_CAFILE
if not os.path.exists(stunnel_cafile):
fatal_error(
"Failed to find certificate authority file for verification",
'Failed to find CAfile "%s"' % stunnel_cafile,
)
efs_config["CAfile"] = stunnel_cafile
def get_config_section(config, region):
region_specific_config_section = "%s.%s" % (CONFIG_SECTION, region)
if config.has_section(region_specific_config_section):
config_section = region_specific_config_section
else:
config_section = CONFIG_SECTION
return config_section
def is_stunnel_option_supported(stunnel_output, stunnel_option_name):
supported = False
for line in stunnel_output:
if line.startswith(stunnel_option_name):
supported = True
break
if not supported:
logging.warning('stunnel does not support "%s"', stunnel_option_name)
return supported
def get_version_specific_stunnel_options():
stunnel_command = [_stunnel_bin(), "-help"]
proc = subprocess.Popen(
stunnel_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
proc.wait()
_, err = proc.communicate()
stunnel_output = err.splitlines()
check_host_supported = is_stunnel_option_supported(stunnel_output, b"checkHost")
ocsp_aia_supported = is_stunnel_option_supported(stunnel_output, b"OCSPaia")
return check_host_supported, ocsp_aia_supported
def _stunnel_bin():
return find_command_path(
"stunnel",
"Please install it following the instructions at "
"https://docs.aws.amazon.com/efs/latest/ug/using-amazon-efs-utils.html#upgrading-stunnel",
)
def find_command_path(command, install_method):
try:
env_path = (
"/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin"
)
os.putenv("PATH", env_path)
path = subprocess.check_output(["which", command])
except subprocess.CalledProcessError as e:
fatal_error(
"Failed to locate %s in %s - %s" % (command, env_path, install_method), e
)
return path.strip().decode()
def get_system_release_version():
# MacOS does not maintain paths /etc/os-release and /etc/sys-release
if check_if_platform_is_mac():
return platform.platform()
try:
with open(SYSTEM_RELEASE_PATH) as f:
return f.read().strip()
except IOError:
logging.debug("Unable to read %s", SYSTEM_RELEASE_PATH)
try:
with open(OS_RELEASE_PATH) as f:
for line in f:
if "PRETTY_NAME" in line:
return line.split("=")[1].strip()
except IOError:
logging.debug("Unable to read %s", OS_RELEASE_PATH)
return DEFAULT_UNKNOWN_VALUE
def write_stunnel_config_file(
config,
state_file_dir,
fs_id,
mountpoint,
tls_port,
dns_name,
verify_level,
ocsp_enabled,
options,
region,
log_dir=LOG_DIR,
cert_details=None,
fallback_ip_address=None,
):
"""
Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to
hand-serialize it.
"""
mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port)
system_release_version = get_system_release_version()
global_config = dict(STUNNEL_GLOBAL_CONFIG)
if any(
release in system_release_version
for release in SKIP_NO_SO_BINDTODEVICE_RELEASES
):
global_config["socket"].remove("a:SO_BINDTODEVICE=lo")
if get_boolean_config_item_value(
config, CONFIG_SECTION, "stunnel_debug_enabled", default_value=False
):
global_config["debug"] = "debug"
if config.has_option(CONFIG_SECTION, "stunnel_logs_file"):
global_config["output"] = config.get(
CONFIG_SECTION, "stunnel_logs_file"
).replace("{fs_id}", fs_id)
else:
global_config["output"] = os.path.join(
log_dir, "%s.stunnel.log" % mount_filename
)
efs_config = dict(STUNNEL_EFS_CONFIG)
efs_config["accept"] = efs_config["accept"] % tls_port
if fallback_ip_address:
efs_config["connect"] = efs_config["connect"] % fallback_ip_address
else:
efs_config["connect"] = efs_config["connect"] % dns_name
efs_config["verify"] = verify_level
if verify_level > 0:
add_stunnel_ca_options(efs_config, config, options, region)
if cert_details:
efs_config["cert"] = cert_details["certificate"]
efs_config["key"] = cert_details["privateKey"]
check_host_supported, ocsp_aia_supported = get_version_specific_stunnel_options()
tls_controls_message = (
"WARNING: Your client lacks sufficient controls to properly enforce TLS. Please upgrade stunnel, "
'or disable "%%s" in %s.\nSee %s for more detail.'
% (CONFIG_FILE, "https://docs.aws.amazon.com/console/efs/troubleshooting-tls")
)
if get_boolean_config_item_value(
config, CONFIG_SECTION, "stunnel_check_cert_hostname", default_value=True
):
if check_host_supported:
# Stunnel checkHost option checks if the specified DNS host name or wildcard matches any of the provider in peer
# certificate's CN fields, after introducing the AZ field in dns name, the host name in the stunnel config file
# is not valid, remove the az info there
efs_config["checkHost"] = dns_name[dns_name.index(fs_id) :]
else:
fatal_error(tls_controls_message % "stunnel_check_cert_hostname")
# Only use the config setting if the override is not set
if ocsp_enabled:
if ocsp_aia_supported:
efs_config["OCSPaia"] = "yes"
else:
fatal_error(tls_controls_message % "stunnel_check_cert_validity")
if not any(
release in system_release_version for release in SKIP_NO_LIBWRAP_RELEASES
):
efs_config["libwrap"] = "no"
stunnel_config = "\n".join(
serialize_stunnel_config(global_config)
+ serialize_stunnel_config(efs_config, "efs")
)
logging.debug("Writing stunnel configuration:\n%s", stunnel_config)
stunnel_config_file = os.path.join(
state_file_dir, "stunnel-config.%s" % mount_filename
)
with open(stunnel_config_file, "w") as f:
f.write(stunnel_config)
return stunnel_config_file
def write_tls_tunnel_state_file(
fs_id,
mountpoint,
tls_port,
tunnel_pid,
command,
files,
state_file_dir,
cert_details=None,
):
"""
Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a
non-temporary version following a successful mount.
"""
state_file = "~" + get_mount_specific_filename(fs_id, mountpoint, tls_port)
state = {
"pid": tunnel_pid,
"cmd": command,
"files": files,
"mount_time": time.time(),
}
if cert_details:
state.update(cert_details)
with open(os.path.join(state_file_dir, state_file), "w") as f:
json.dump(state, f)
return state_file
def test_tunnel_process(tunnel_proc, fs_id):
tunnel_proc.poll()
if tunnel_proc.returncode is not None:
out, err = tunnel_proc.communicate()
fatal_error(
"Failed to initialize TLS tunnel for %s" % fs_id,
'Failed to start TLS tunnel (errno=%d). stdout="%s" stderr="%s"'
% (tunnel_proc.returncode, out.strip(), err.strip()),
)
def poll_tunnel_process(tunnel_proc, fs_id, mount_completed):
"""
poll the tunnel process health every .5s during the mount attempt to fail fast if the tunnel dies - since this is not called
from the main thread, if the tunnel fails, exit uncleanly with os._exit
"""
while not mount_completed.is_set():
try:
test_tunnel_process(tunnel_proc, fs_id)
except SystemExit as e:
os._exit(e.code)
mount_completed.wait(0.5)
def get_init_system(comm_file="/proc/1/comm"):
init_system = DEFAULT_UNKNOWN_VALUE
if not check_if_platform_is_mac():
try:
with open(comm_file) as f:
init_system = f.read().strip()
except IOError:
logging.warning("Unable to read %s", comm_file)
else:
init_system = "launchd"
logging.debug("Identified init system: %s", init_system)
return init_system
def check_network_target(fs_id):
with open(os.devnull, "w") as devnull:
if not check_if_platform_is_mac():
rc = subprocess.call(
["systemctl", "status", "network.target"],
stdout=devnull,
stderr=devnull,
close_fds=True,
)
else:
rc = subprocess.call(
["sudo", "ifconfig", "en0"],
stdout=devnull,
stderr=devnull,
close_fds=True,
)
if rc != 0:
fatal_error(
'Failed to mount %s because the network was not yet available, add "_netdev" to your mount options'
% fs_id,
exit_code=0,
)
def check_network_status(fs_id, init_system):
if init_system != "systemd":
logging.debug("Not testing network on non-systemd init systems")
return
check_network_target(fs_id)
def start_watchdog(init_system):
if init_system == "init":
proc = subprocess.Popen(
["/sbin/status", WATCHDOG_SERVICE],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
status, _ = proc.communicate()
if "stop" in str(status):
subprocess.Popen(
["/sbin/start", WATCHDOG_SERVICE],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
close_fds=True,
)
elif "start" in str(status):
logging.debug("%s is already running", WATCHDOG_SERVICE)
elif init_system == "systemd":
rc = subprocess.call(
["systemctl", "is-active", "--quiet", WATCHDOG_SERVICE], close_fds=True
)
if rc != 0:
subprocess.Popen(
["systemctl", "start", WATCHDOG_SERVICE],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
close_fds=True,
)
else:
logging.debug("%s is already running", WATCHDOG_SERVICE)
elif init_system == "launchd":
rc = subprocess.Popen(
["sudo", "launchctl", "list", WATCHDOG_SERVICE],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
if rc != 0:
if not os.path.exists(WATCHDOG_SERVICE_PLIST_PATH):
fatal_error(
"Watchdog plist file missing. Copy the watchdog plist file in directory /Library/LaunchAgents"
)
subprocess.Popen(
["sudo", "launchctl", "load", WATCHDOG_SERVICE_PLIST_PATH],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
close_fds=True,
)
else:
logging.debug("%s is already running", WATCHDOG_SERVICE)
else:
error_message = 'Could not start %s, unrecognized init system "%s"' % (
WATCHDOG_SERVICE,
init_system,
)
sys.stderr.write("%s\n" % error_message)
logging.warning(error_message)
def create_required_directory(config, directory):
mode = 0o750
try:
mode_str = config.get(CONFIG_SECTION, "state_file_dir_mode")
try:
mode = int(mode_str, 8)
except ValueError:
logging.warning(
'Bad state_file_dir_mode "%s" in config file "%s"',
mode_str,
CONFIG_FILE,
)
except NoOptionError:
pass
try:
os.makedirs(directory, mode)
except OSError as e:
if errno.EEXIST != e.errno or not os.path.isdir(directory):
raise
@contextmanager
def bootstrap_tls(
config,
init_system,
dns_name,
fs_id,
mountpoint,
options,
state_file_dir=STATE_FILE_DIR,
fallback_ip_address=None,
):
tls_port = choose_tls_port(config, options)
# override the tlsport option so that we can later override the port the NFS client uses to connect to stunnel.
# if the user has specified tlsport=X at the command line this will just re-set tlsport to X.
options["tlsport"] = tls_port
use_iam = "iam" in options
ap_id = options.get("accesspoint")
cert_details = {}
security_credentials = None
client_info = get_client_info(config)
region = get_target_region(config)
if use_iam:
aws_creds_uri = options.get("awscredsuri")
if aws_creds_uri:
kwargs = {"aws_creds_uri": aws_creds_uri}
else:
kwargs = {"awsprofile": get_aws_profile(options, use_iam)}
security_credentials, credentials_source = get_aws_security_credentials(
config, use_iam, region, **kwargs
)
if credentials_source:
cert_details["awsCredentialsMethod"] = credentials_source
if ap_id:
cert_details["accessPoint"] = ap_id
# additional symbol appended to avoid naming collisions
cert_details["mountStateDir"] = (
get_mount_specific_filename(fs_id, mountpoint, tls_port) + "+"
)
# common name for certificate signing request is max 64 characters
cert_details["commonName"] = socket.gethostname()[0:64]
region = get_target_region(config)
cert_details["region"] = region
cert_details["certificateCreationTime"] = create_certificate(
config,
cert_details["mountStateDir"],
cert_details["commonName"],
cert_details["region"],
fs_id,
security_credentials,
ap_id,
client_info,
base_path=state_file_dir,
)
cert_details["certificate"] = os.path.join(
state_file_dir, cert_details["mountStateDir"], "certificate.pem"
)
cert_details["privateKey"] = get_private_key_path()
cert_details["fsId"] = fs_id
start_watchdog(init_system)
if not os.path.exists(state_file_dir):
create_required_directory(config, state_file_dir)
verify_level = int(options.get("verify", DEFAULT_STUNNEL_VERIFY_LEVEL))
ocsp_enabled = is_ocsp_enabled(config, options)
stunnel_config_file = write_stunnel_config_file(
config,
state_file_dir,
fs_id,
mountpoint,
tls_port,
dns_name,
verify_level,
ocsp_enabled,
options,
region,
cert_details=cert_details,
fallback_ip_address=fallback_ip_address,
)
tunnel_args = [_stunnel_bin(), stunnel_config_file]
if "netns" in options:
tunnel_args = ["nsenter", "--net=" + options["netns"]] + tunnel_args
# launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog
logging.info('Starting TLS tunnel: "%s"', " ".join(tunnel_args))
tunnel_proc = subprocess.Popen(
tunnel_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
close_fds=True,
)
logging.info("Started TLS tunnel, pid: %d", tunnel_proc.pid)
temp_tls_state_file = write_tls_tunnel_state_file(
fs_id,
mountpoint,
tls_port,
tunnel_proc.pid,
tunnel_args,
[stunnel_config_file],
state_file_dir,
cert_details=cert_details,
)
if "netns" not in options:
test_tlsport(options["tlsport"])
else:
with NetNS(nspath=options["netns"]):
test_tlsport(options["tlsport"])
try:
yield tunnel_proc
finally:
os.rename(
os.path.join(state_file_dir, temp_tls_state_file),
os.path.join(state_file_dir, temp_tls_state_file[1:]),
)
def test_tlsport(tlsport):
retry_times = 5
while not verify_tlsport_can_be_connected(tlsport) and retry_times > 0:
logging.debug(
"The tlsport %s cannot be connected yet, sleep %s(s), %s retry time(s) left",
DEFAULT_TIMEOUT,
tlsport,
retry_times,
)
time.sleep(DEFAULT_TIMEOUT)
retry_times -= 1
def check_if_nfsvers_is_compatible_with_macos(options):
# MacOS does not support NFSv4.1
if (
("nfsvers" in options and options["nfsvers"] == "4.1")
or ("vers" in options and options["vers"] == "4.1")
or ("minorversion" in options and options["minorversion"] == 1)
):
fatal_error("NFSv4.1 is not supported on MacOS, please switch to NFSv4.0")
def get_nfs_mount_options(options):
# If you change these options, update the man page as well at man/mount.efs.8
if "nfsvers" not in options and "vers" not in options:
options["nfsvers"] = "4.1" if not check_if_platform_is_mac() else "4.0"
if check_if_platform_is_mac():
check_if_nfsvers_is_compatible_with_macos(options)
if "rsize" not in options:
options["rsize"] = "1048576"
if "wsize" not in options:
options["wsize"] = "1048576"
if "soft" not in options and "hard" not in options:
options["hard"] = None
if "timeo" not in options:
options["timeo"] = "600"
if "retrans" not in options:
options["retrans"] = "2"
if "noresvport" not in options:
options["noresvport"] = None
# Set mountport to 2049 for MacOS
if check_if_platform_is_mac():
options["mountport"] = "2049"
if "tls" in options:
options["port"] = options["tlsport"]
def to_nfs_option(k, v):
if v is None:
return k
return "%s=%s" % (str(k), str(v))
nfs_options = [
to_nfs_option(k, v) for k, v in options.items() if k not in EFS_ONLY_OPTIONS
]
return ",".join(nfs_options)
def mount_nfs(config, dns_name, path, mountpoint, options, fallback_ip_address=None):
if "tls" in options:
mount_path = "127.0.0.1:%s" % path
elif fallback_ip_address:
mount_path = "%s:%s" % (fallback_ip_address, path)
else:
mount_path = "%s:%s" % (dns_name, path)
if not check_if_platform_is_mac():
command = [
"/sbin/mount.nfs4",
mount_path,
mountpoint,
"-o",
get_nfs_mount_options(options),
]
else:
command = [
"/sbin/mount_nfs",
"-o",
get_nfs_mount_options(options),
mount_path,
mountpoint,
]
if "netns" in options:
command = ["nsenter", "--net=" + options["netns"]] + command
logging.info('Executing: "%s"', " ".join(command))
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
out, err = proc.communicate()
if proc.returncode == 0:
message = "Successfully mounted %s at %s" % (dns_name, mountpoint)
logging.info(message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, message)
# only perform readahead optimize after mount succeed
optimize_readahead_window(mountpoint, options, config)
else:
message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (
dns_name,
mountpoint,
proc.returncode,
err.strip(),
)
fatal_error(err.strip(), message, proc.returncode)
def usage(out, exit_code=1):
out.write(
"Usage: mount.efs [--version] [-h|--help] <fsname> <mountpoint> [-o <options>]\n"
)
sys.exit(exit_code)
def parse_arguments_early_exit(args=None):
"""Parse arguments, checking for early exit conditions only"""
if args is None:
args = sys.argv
if "-h" in args[1:] or "--help" in args[1:]:
usage(out=sys.stdout, exit_code=0)
if "--version" in args[1:]:
sys.stdout.write("%s Version: %s\n" % (args[0], VERSION))
sys.exit(0)
def parse_arguments(config, args=None):
"""Parse arguments, return (fsid, path, mountpoint, options)"""
if args is None:
args = sys.argv
fsname = None
mountpoint = None
options = {}
if not check_if_platform_is_mac():
if len(args) > 1:
fsname = args[1]
if len(args) > 2:
mountpoint = args[2]
if len(args) > 4 and "-o" in args[:-1]:
options_index = args.index("-o") + 1
options = parse_options(args[options_index])
else:
if len(args) > 1:
fsname = args[-2]
if len(args) > 2:
mountpoint = args[-1]
if len(args) > 4 and "-o" in args[:-2]:
for arg in args[1:-2]:
if arg != "-o":
options.update(parse_options(arg))
if not fsname or not mountpoint:
usage(out=sys.stderr)
# We treat az as an option when customer is using dns name of az mount target to mount,
# even if they don't provide az with option, we update the options with that info
fs_id, path, az = match_device(config, fsname, options)
return fs_id, path, mountpoint, add_field_in_options(options, "az", az)
def get_client_info(config):
client_info = {}
# source key/value pair in config file
if config.has_option(CLIENT_INFO_SECTION, "source"):
client_source = config.get(CLIENT_INFO_SECTION, "source")
if 0 < len(client_source) <= CLIENT_SOURCE_STR_LEN_LIMIT:
client_info["source"] = client_source
if not client_info.get("source"):
if check_if_platform_is_mac():
client_info["source"] = DEFAULT_MACOS_VALUE
else:
client_info["source"] = DEFAULT_UNKNOWN_VALUE
client_info["efs_utils_version"] = VERSION
return client_info
def create_certificate(
config,
mount_name,
common_name,
region,
fs_id,
security_credentials,
ap_id,
client_info,
base_path=STATE_FILE_DIR,
):
current_time = get_utc_now()
tls_paths = tls_paths_dictionary(mount_name, base_path)
certificate_config = os.path.join(tls_paths["mount_dir"], "config.conf")
certificate_signing_request = os.path.join(tls_paths["mount_dir"], "request.csr")
certificate = os.path.join(tls_paths["mount_dir"], "certificate.pem")
ca_dirs_check(config, tls_paths["database_dir"], tls_paths["certs_dir"])
ca_supporting_files_check(
tls_paths["index"],
tls_paths["index_attr"],
tls_paths["serial"],
tls_paths["rand"],
)
private_key = check_and_create_private_key(base_path)
if security_credentials:
public_key = os.path.join(tls_paths["mount_dir"], "publicKey.pem")
create_public_key(private_key, public_key)
create_ca_conf(
certificate_config,
common_name,
tls_paths["mount_dir"],
private_key,
current_time,
region,
fs_id,
security_credentials,
ap_id,
client_info,
)
create_certificate_signing_request(
certificate_config, private_key, certificate_signing_request
)
not_before = get_certificate_timestamp(current_time, minutes=-NOT_BEFORE_MINS)
not_after = get_certificate_timestamp(current_time, hours=NOT_AFTER_HOURS)
cmd = "openssl ca -startdate %s -enddate %s -selfsign -batch -notext -config %s -in %s -out %s" % (
not_before,
not_after,
certificate_config,
certificate_signing_request,
certificate,
)
subprocess_call(cmd, "Failed to create self-signed client-side certificate")
return current_time.strftime(CERT_DATETIME_FORMAT)
def get_private_key_path():
"""Wrapped for mocking purposes in unit tests"""
return PRIVATE_KEY_FILE
def check_and_create_private_key(base_path=STATE_FILE_DIR):
# Creating RSA private keys is slow, so we will create one private key and allow mounts to share it.
# This means, however, that we have to include a locking mechanism to ensure that the private key is
# atomically created, as mounts occurring in parallel may try to create the key simultaneously.
key = get_private_key_path()
@contextmanager
def open_lock_file():
lock_file = os.path.join(base_path, "efs-utils-lock")
f = os.open(lock_file, os.O_CREAT | os.O_DSYNC | os.O_EXCL | os.O_RDWR)
try:
lock_file_contents = "PID: %s" % os.getpid()
os.write(f, lock_file_contents.encode("utf-8"))
yield f
finally:
os.close(f)
os.remove(lock_file)
def do_with_lock(function):
while True:
try:
with open_lock_file():
return function()
except OSError as e:
if e.errno == errno.EEXIST:
logging.info(
"Failed to take out private key creation lock, sleeping %s (s)",
DEFAULT_TIMEOUT,
)
time.sleep(DEFAULT_TIMEOUT)
else:
raise
def generate_key():
if os.path.isfile(key):
return
cmd = (
"openssl genpkey -algorithm RSA -out %s -pkeyopt rsa_keygen_bits:3072" % key
)
subprocess_call(cmd, "Failed to create private key")
read_only_mode = 0o400
os.chmod(key, read_only_mode)
do_with_lock(generate_key)
return key
def create_certificate_signing_request(config_path, private_key, csr_path):
cmd = "openssl req -new -config %s -key %s -out %s" % (
config_path,
private_key,
csr_path,
)
subprocess_call(cmd, "Failed to create certificate signing request (csr)")
def create_ca_conf(
config_path,
common_name,
directory,
private_key,
date,
region,
fs_id,
security_credentials,
ap_id,
client_info,
):
"""Populate ca/req configuration file with fresh configurations at every mount since SigV4 signature can change"""
public_key_path = os.path.join(directory, "publicKey.pem")
ca_extension_body = ca_extension_builder(
ap_id, security_credentials, fs_id, client_info
)
efs_client_auth_body = (
efs_client_auth_builder(
public_key_path,
security_credentials["AccessKeyId"],
security_credentials["SecretAccessKey"],
date,
region,
fs_id,
security_credentials["Token"],
)
if security_credentials
else ""
)
efs_client_info_body = efs_client_info_builder(client_info) if client_info else ""
full_config_body = CA_CONFIG_BODY % (
directory,
private_key,
common_name,
ca_extension_body,
efs_client_auth_body,
efs_client_info_body,
)
with open(config_path, "w") as f:
f.write(full_config_body)
return full_config_body
def ca_extension_builder(ap_id, security_credentials, fs_id, client_info):
ca_extension_str = "[ v3_ca ]\nsubjectKeyIdentifier = hash"
if ap_id:
ca_extension_str += "\n1.3.6.1.4.1.4843.7.1 = ASN1:UTF8String:" + ap_id
if security_credentials:
ca_extension_str += "\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth"
ca_extension_str += "\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:" + fs_id
if client_info:
ca_extension_str += "\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info"
return ca_extension_str
def efs_client_auth_builder(
public_key_path,
access_key_id,
secret_access_key,
date,
region,
fs_id,
session_token=None,
):
public_key_hash = get_public_key_sha1(public_key_path)
canonical_request = create_canonical_request(
public_key_hash, date, access_key_id, region, fs_id, session_token
)
string_to_sign = create_string_to_sign(canonical_request, date, region)
signature = calculate_signature(string_to_sign, date, secret_access_key, region)
efs_client_auth_str = "[ efs_client_auth ]"
efs_client_auth_str += "\naccessKeyId = UTF8String:" + access_key_id
efs_client_auth_str += "\nsignature = OCTETSTRING:" + signature
efs_client_auth_str += "\nsigv4DateTime = UTCTIME:" + date.strftime(
CERT_DATETIME_FORMAT
)
if session_token:
efs_client_auth_str += "\nsessionToken = EXPLICIT:0,UTF8String:" + session_token
return efs_client_auth_str
def efs_client_info_builder(client_info):
efs_client_info_str = "[ efs_client_info ]"
for key, value in client_info.items():
efs_client_info_str += "\n%s = UTF8String:%s" % (key, value)
return efs_client_info_str
def create_public_key(private_key, public_key):
cmd = "openssl rsa -in %s -outform PEM -pubout -out %s" % (private_key, public_key)
subprocess_call(cmd, "Failed to create public key")
def subprocess_call(cmd, error_message):
"""Helper method to run shell openssl command and to handle response error messages"""
retry_times = 3
for retry in range(retry_times):
process = subprocess.Popen(
cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
(output, err) = process.communicate()
rc = process.poll()
if rc != 0:
logging.error(
'Command %s failed, rc=%s, stdout="%s", stderr="%s"'
% (cmd, rc, output, err),
exc_info=True,
)
try:
process.kill()
except OSError:
# Silently fail if the subprocess has exited already
pass
else:
return output, err
error_message = "%s, error is: %s" % (error_message, err)
fatal_error(error_message, error_message)
def ca_dirs_check(config, database_dir, certs_dir):
"""Check if mount's database and certs directories exist and if not, create directories (also create all intermediate
directories if they don't exist)."""
if not os.path.exists(database_dir):
create_required_directory(config, database_dir)
if not os.path.exists(certs_dir):
create_required_directory(config, certs_dir)
def ca_supporting_files_check(index_path, index_attr_path, serial_path, rand_path):
"""Recreate all supporting openssl ca and req files if they're not present in their respective directories"""
if not os.path.isfile(index_path):
open(index_path, "w").close()
if not os.path.isfile(index_attr_path):
with open(index_attr_path, "w+") as f:
f.write("unique_subject = no")
if not os.path.isfile(serial_path):
with open(serial_path, "w+") as f:
f.write("00")
if not os.path.isfile(rand_path):
open(rand_path, "w").close()
def get_certificate_timestamp(current_time, **kwargs):
updated_time = current_time + timedelta(**kwargs)
return updated_time.strftime(CERT_DATETIME_FORMAT)
def get_utc_now():
"""
Wrapped for patching purposes in unit tests
"""
return datetime.utcnow()
def assert_root():
if os.geteuid() != 0:
sys.stderr.write("only root can run mount.efs\n")
sys.exit(1)
def read_config(config_file=CONFIG_FILE):
try:
p = ConfigParser.SafeConfigParser()
except AttributeError:
p = ConfigParser()
p.read(config_file)
return p
def bootstrap_logging(config, log_dir=LOG_DIR):
raw_level = config.get(CONFIG_SECTION, "logging_level")
levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
level = levels.get(raw_level.lower())
level_error = False
if not level:
# delay logging error about malformed log level until after logging is configured
level_error = True
level = logging.INFO
max_bytes = config.getint(CONFIG_SECTION, "logging_max_bytes")
file_count = config.getint(CONFIG_SECTION, "logging_file_count")
handler = RotatingFileHandler(
os.path.join(log_dir, LOG_FILE), maxBytes=max_bytes, backupCount=file_count
)
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(message)s")
)
logger = logging.getLogger()
logger.setLevel(level)
logger.addHandler(handler)
if level_error:
logging.error(
'Malformed logging level "%s", setting logging level to %s',
raw_level,
level,
)
def get_dns_name_and_fallback_mount_target_ip_address(config, fs_id, options):
def _validate_replacement_field_count(format_str, expected_ct):
if format_str.count("{") != expected_ct or format_str.count("}") != expected_ct:
raise ValueError(
"DNS name format has an incorrect number of replacement fields"
)
dns_name_format = config.get(CONFIG_SECTION, "dns_name_format")
if "{fs_id}" not in dns_name_format:
raise ValueError("DNS name format must include {fs_id}")
format_args = {"fs_id": fs_id}
expected_replacement_field_ct = 1
if "{az}" in dns_name_format:
az = options.get("az")
if az:
expected_replacement_field_ct += 1
format_args["az"] = az
else:
dns_name_format = dns_name_format.replace("{az}.", "")
if "{region}" in dns_name_format:
expected_replacement_field_ct += 1
format_args["region"] = get_target_region(config)
if "{dns_name_suffix}" in dns_name_format:
expected_replacement_field_ct += 1
config_section = CONFIG_SECTION
region = format_args.get("region")
if region:
config_section = get_config_section(config, region)
format_args["dns_name_suffix"] = config.get(config_section, "dns_name_suffix")
logging.debug(
"Using dns_name_suffix %s in config section [%s]",
format_args.get("dns_name_suffix"),
config_section,
)
_validate_replacement_field_count(dns_name_format, expected_replacement_field_ct)
dns_name = dns_name_format.format(**format_args)
if "mounttargetip" in options:
ip_address = options.get("mounttargetip")
logging.info(
"Use the mount target ip address %s provided in the mount options to mount."
% ip_address
)
try:
mount_target_ip_address_can_be_resolved(
ip_address,
passed_via_options=True,
network_namespace=options.get("netns") if "netns" in options else None,
)
return dns_name, options.get("mounttargetip")
except FallbackException as e:
fallback_message = e.message
throw_ip_address_connect_failure_with_fallback_message(
ip_address=ip_address, fallback_message=fallback_message
)
if dns_name_can_be_resolved(dns_name):
return dns_name, None
logging.info(
"Failed to resolve %s, attempting to lookup mount target ip address using botocore.",
dns_name,
)
try:
fallback_mount_target_ip_address = get_fallback_mount_target_ip_address(
config, options, fs_id, dns_name
)
logging.info(
"Found fall back mount target ip address %s for file system %s",
fallback_mount_target_ip_address,
fs_id,
)
return dns_name, fallback_mount_target_ip_address
except FallbackException as e:
fallback_message = e.message
throw_dns_resolve_failure_with_fallback_message(dns_name, fallback_message)
def get_fallback_mount_target_ip_address(config, options, fs_id, dns_name):
fall_back_to_ip_address_enabled = (
check_if_fall_back_to_mount_target_ip_address_is_enabled(config)
)
if not fall_back_to_ip_address_enabled:
fallback_message = (
"Fallback to mount target ip address feature is not enabled in config file %s."
% CONFIG_FILE
)
raise FallbackException(fallback_message)
if not BOTOCORE_PRESENT:
fallback_message = "Failed to import necessary dependency botocore, please install botocore first."
raise FallbackException(fallback_message)
mount_target_ip_address = None
try:
mount_target_ip_address = get_fallback_mount_target_ip_address_helper(
config, options, fs_id
)
mount_target_ip_address_can_be_resolved(
mount_target_ip_address,
network_namespace=options.get("netns") if "netns" in options else None,
)
return mount_target_ip_address
except FallbackException as e:
throw_ip_address_connect_failure_with_fallback_message(
dns_name, mount_target_ip_address, e.message
)
def check_if_fall_back_to_mount_target_ip_address_is_enabled(config):
return get_boolean_config_item_value(
config,
CONFIG_SECTION,
FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM,
default_value=DEFAULT_FALLBACK_ENABLED,
)
def dns_name_can_be_resolved(dns_name):
try:
socket.gethostbyname(dns_name)
return True
except socket.gaierror:
return False
def mount_target_ip_address_can_be_resolved(
mount_target_ip_address, passed_via_options=False, network_namespace=None
):
tries = 3
for attempt in range(tries):
try:
# Open a socket connection to mount target nfs port to verify that the mount target can be connected
if not network_namespace:
s = socket.create_connection((mount_target_ip_address, 2049), timeout=2)
else:
with NetNS(nspath=network_namespace):
s = socket.create_connection(
(mount_target_ip_address, 2049), timeout=2
)
s.close()
return True
except socket.timeout:
if attempt < tries - 1:
message = (
"The ip address %s cannot be connected yet, sleep 0.5s, %s retry time(s) left"
% (mount_target_ip_address, tries - attempt - 1)
)
logging.warning(message)
time.sleep(0.5)
continue
else:
raise FallbackException(
"Connection to the mount target IP address %s timeout. Please retry in 5 minutes if the "
"mount target is newly created. Otherwise check your VPC and security group "
"configuration to ensure your file system is reachable via TCP port 2049 from your "
"instance." % mount_target_ip_address
)
except Exception as e:
hint_message = (
" Please check if the mount target ip address passed via mount option is correct."
if passed_via_options
else ""
)
raise FallbackException(
"Unknown error when connecting to mount target IP address %s, %s.%s"
% (mount_target_ip_address, e, hint_message)
)
def get_fallback_mount_target_ip_address_helper(config, options, fs_id):
az_name = get_target_az(config, options)
ec2_client = get_botocore_client(config, "ec2", options)
efs_client = get_botocore_client(config, "efs", options)
mount_target = get_mount_target_in_az(efs_client, ec2_client, fs_id, az_name)
mount_target_ip = mount_target.get("IpAddress")
logging.debug("Found mount target ip address %s in AZ %s", mount_target_ip, az_name)
return mount_target_ip
def throw_dns_resolve_failure_with_fallback_message(dns_name, fallback_message=None):
fallback_message = (
"\nAttempting to lookup mount target ip address using botocore. %s"
% fallback_message
if fallback_message
else ""
)
message = (
'Failed to resolve "%s" - check that your file system ID is correct, and ensure that the VPC has an EFS mount '
"target for this file system ID.\nSee %s for more detail.%s"
) % (
dns_name,
"https://docs.aws.amazon.com/console/efs/mount-dns-name",
fallback_message,
)
fatal_error(message)
def throw_ip_address_connect_failure_with_fallback_message(
dns_name=None, ip_address=None, fallback_message=None
):
dns_message = 'Failed to resolve "%s". ' % dns_name if dns_name else ""
if not ip_address:
ip_address_message = (
"The file system mount target ip address cannot be found, please pass mount target ip "
"address via mount options. "
)
else:
ip_address_message = (
"Cannot connect to file system mount target ip address %s. " % ip_address
)
fallback_message = "\n%s" % fallback_message if fallback_message else ""
fatal_error("%s%s%s" % (dns_message, ip_address_message, fallback_message))
def tls_paths_dictionary(mount_name, base_path=STATE_FILE_DIR):
tls_dict = {
"mount_dir": os.path.join(base_path, mount_name),
# every mount will have its own ca mode assets due to lack of multi-threading support in openssl
"database_dir": os.path.join(base_path, mount_name, "database"),
"certs_dir": os.path.join(base_path, mount_name, "certs"),
"index": os.path.join(base_path, mount_name, "database/index.txt"),
"index_attr": os.path.join(base_path, mount_name, "database/index.txt.attr"),
"serial": os.path.join(base_path, mount_name, "database/serial"),
"rand": os.path.join(base_path, mount_name, "database/.rand"),
}
return tls_dict
def get_public_key_sha1(public_key):
# truncating public key to remove the header and footer '-----(BEGIN|END) PUBLIC KEY-----'
with open(public_key, "r") as f:
lines = f.readlines()
lines = lines[1:-1]
key = "".join(lines)
key = bytearray(base64.b64decode(key))
# Parse the public key to pull out the actual key material by looking for the key BIT STRING
# Example:
# 0:d=0 hl=4 l= 418 cons: SEQUENCE
# 4:d=1 hl=2 l= 13 cons: SEQUENCE
# 6:d=2 hl=2 l= 9 prim: OBJECT :rsaEncryption
# 17:d=2 hl=2 l= 0 prim: NULL
# 19:d=1 hl=4 l= 399 prim: BIT STRING
cmd = "openssl asn1parse -inform PEM -in %s" % public_key
output, err = subprocess_call(
cmd, "Unable to ASN1 parse public key file, %s, correctly" % public_key
)
key_line = ""
for line in output.splitlines():
if "BIT STRING" in line.decode("utf-8"):
key_line = line.decode("utf-8")
if not key_line:
err_msg = "Public key file, %s, is incorrectly formatted" % public_key
fatal_error(err_msg, err_msg)
key_line = key_line.replace(" ", "")
# DER encoding TLV (Tag, Length, Value)
# - the first octet (byte) is the tag (type)
# - the next octets are the length - "definite form"
# - the first octet always has the high order bit (8) set to 1
# - the remaining 127 bits are used to encode the number of octets that follow
# - the following octets encode, as big-endian, the length (which may be 0) as a number of octets
# - the remaining octets are the "value" aka content
#
# For a BIT STRING, the first octet of the value is used to signify the number of unused bits that exist in the last
# content byte. Note that this is explicitly excluded from the SubjectKeyIdentifier hash, per
# https://tools.ietf.org/html/rfc5280#section-4.2.1.2
#
# Example:
# 0382018f00...<subjectPublicKey>
# - 03 - BIT STRING tag
# - 82 - 2 length octets to follow (ignore high order bit)
# - 018f - length of 399
# - 00 - no unused bits in the last content byte
offset = int(key_line.split(":")[0])
key = key[offset:]
num_length_octets = key[1] & 0b01111111
# Exclude the tag (1), length (1 + num_length_octets), and number of unused bits (1)
offset = 1 + 1 + num_length_octets + 1
key = key[offset:]
sha1 = hashlib.sha1()
sha1.update(key)
return sha1.hexdigest()
def create_canonical_request(
public_key_hash, date, access_key, region, fs_id, session_token=None
):
"""
Create a Canonical Request - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
"""
formatted_datetime = date.strftime(SIGV4_DATETIME_FORMAT)
credential = quote_plus(access_key + "/" + get_credential_scope(date, region))
request = HTTP_REQUEST_METHOD + "\n"
request += CANONICAL_URI + "\n"
request += (
create_canonical_query_string(
public_key_hash, credential, formatted_datetime, session_token
)
+ "\n"
)
request += CANONICAL_HEADERS % fs_id + "\n"
request += SIGNED_HEADERS + "\n"
sha256 = hashlib.sha256()
sha256.update(REQUEST_PAYLOAD.encode())
request += sha256.hexdigest()
return request
def create_canonical_query_string(
public_key_hash, credential, formatted_datetime, session_token=None
):
canonical_query_params = {
"Action": "Connect",
# Public key hash is included in canonical request to tie the signature to a specific key pair to avoid replay attacks
"PublicKeyHash": quote_plus(public_key_hash),
"X-Amz-Algorithm": ALGORITHM,
"X-Amz-Credential": credential,
"X-Amz-Date": quote_plus(formatted_datetime),
"X-Amz-Expires": 86400,
"X-Amz-SignedHeaders": SIGNED_HEADERS,
}
if session_token:
canonical_query_params["X-Amz-Security-Token"] = quote_plus(session_token)
# Cannot use urllib.urlencode because it replaces the %s's
return "&".join(
["%s=%s" % (k, v) for k, v in sorted(canonical_query_params.items())]
)
def create_string_to_sign(canonical_request, date, region):
"""
Create a String to Sign - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
"""
string_to_sign = ALGORITHM + "\n"
string_to_sign += date.strftime(SIGV4_DATETIME_FORMAT) + "\n"
string_to_sign += get_credential_scope(date, region) + "\n"
sha256 = hashlib.sha256()
sha256.update(canonical_request.encode())
string_to_sign += sha256.hexdigest()
return string_to_sign
def calculate_signature(string_to_sign, date, secret_access_key, region):
"""
Calculate the Signature - https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
"""
def _sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256)
key_date = _sign(
("AWS4" + secret_access_key).encode("utf-8"), date.strftime(DATE_ONLY_FORMAT)
).digest()
add_region = _sign(key_date, region).digest()
add_service = _sign(add_region, SERVICE).digest()
signing_key = _sign(add_service, "aws4_request").digest()
return _sign(signing_key, string_to_sign).hexdigest()
def get_credential_scope(date, region):
return "/".join([date.strftime(DATE_ONLY_FORMAT), region, SERVICE, AWS4_REQUEST])
def match_device(config, device, options):
"""Return the EFS id, the remote path, and the az to mount"""
try:
remote, path = device.split(":", 1)
except ValueError:
remote = device
path = "/"
if FS_ID_RE.match(remote):
return remote, path, None
try:
primary, secondaries, _ = socket.gethostbyname_ex(remote)
hostnames = list(filter(lambda e: e is not None, [primary] + secondaries))
except socket.gaierror:
create_default_cloudwatchlog_agent_if_not_exist(config, options)
fatal_error(
'Failed to resolve "%s" - check that the specified DNS name is a CNAME record resolving to a valid EFS DNS '
"name" % remote,
'Failed to resolve "%s"' % remote,
)
if not hostnames:
create_default_cloudwatchlog_agent_if_not_exist(config, options)
fatal_error(
'The specified domain name "%s" did not resolve to an EFS mount target'
% remote
)
for hostname in hostnames:
efs_fqdn_match = EFS_FQDN_RE.match(hostname)
if efs_fqdn_match:
az = efs_fqdn_match.group("az")
fs_id = efs_fqdn_match.group("fs_id")
if az and "az" in options and az != options["az"]:
fatal_error(
'The hostname "%s" resolved by the specified domain name "%s" does not match the az provided in the '
"mount options, expected = %s, given = %s"
% (hostname, remote, options["az"], az)
)
expected_dns_name, _ = get_dns_name_and_fallback_mount_target_ip_address(
config, fs_id, add_field_in_options(options, "az", az)
)
# check that the DNS name of the mount target matches exactly the DNS name the CNAME resolves to
if hostname == expected_dns_name:
return fs_id, path, az
else:
create_default_cloudwatchlog_agent_if_not_exist(config, options)
fatal_error(
'The specified CNAME "%s" did not resolve to a valid DNS name for an EFS mount target. '
"Please refer to the EFS documentation for mounting with DNS names for examples: %s"
% (
remote,
"https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html",
)
)
def add_field_in_options(options, field_key, field_value):
if field_value and field_key not in options:
options[field_key] = field_value
return options
def is_nfs_mount(mountpoint):
if not check_if_platform_is_mac():
cmd = ["stat", "-f", "-L", "-c", "%T", mountpoint]
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
output, _ = p.communicate()
return output and "nfs" in str(output)
else:
process = subprocess.run(
["mount", "-t", "nfs"],
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
stdout = process.stdout
if not stdout:
return False
mounts = stdout.split("\n")
for mount in mounts:
_mount = mount.split()
if len(_mount) >= 4 and _mount[2] == mountpoint and "nfs" in _mount[3]:
return True
return False
def mount_tls(
config,
init_system,
dns_name,
path,
fs_id,
mountpoint,
options,
fallback_ip_address=None,
):
if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint):
sys.stdout.write(
"%s is already mounted, please run 'mount' command to verify\n" % mountpoint
)
logging.warning("%s is already mounted, mount aborted" % mountpoint)
return
with bootstrap_tls(
config,
init_system,
dns_name,
fs_id,
mountpoint,
options,
fallback_ip_address=fallback_ip_address,
) as tunnel_proc:
mount_completed = threading.Event()
t = threading.Thread(
target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed)
)
t.daemon = True
t.start()
mount_nfs(config, dns_name, path, mountpoint, options)
mount_completed.set()
t.join()
def verify_tlsport_can_be_connected(tlsport):
try:
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
except Exception as e:
logging.warning("Error opening a socket, %s", e)
return False
try:
logging.debug("Trying to connect to 127.0.0.1: %s", tlsport)
test_socket.connect(("127.0.0.1", tlsport))
return True
except ConnectionRefusedError:
return False
finally:
test_socket.close()
def check_unsupported_options(options):
for unsupported_option in UNSUPPORTED_OPTIONS:
if unsupported_option in options:
warn_message = (
'The "%s" option is not supported and has been ignored, as amazon-efs-utils relies on a built-in '
"trust store." % unsupported_option
)
sys.stderr.write("WARN: %s\n" % warn_message)
logging.warning(warn_message)
del options[unsupported_option]
def check_options_validity(options):
if "tls" in options:
if "port" in options:
fatal_error('The "port" and "tls" options are mutually exclusive')
if "tlsport" in options:
try:
int(options["tlsport"])
except ValueError:
fatal_error(
"tlsport option [%s] is not an integer" % options["tlsport"]
)
if "ocsp" in options and "noocsp" in options:
fatal_error('The "ocsp" and "noocsp" options are mutually exclusive')
if "notls" in options:
fatal_error('The "tls" and "notls" options are mutually exclusive')
if "accesspoint" in options:
if "tls" not in options:
fatal_error('The "tls" option is required when mounting via "accesspoint"')
if not AP_ID_RE.match(options["accesspoint"]):
fatal_error("Access Point ID %s is malformed" % options["accesspoint"])
if "iam" in options and "tls" not in options:
fatal_error('The "tls" option is required when mounting via "iam"')
if "awsprofile" in options and "iam" not in options:
fatal_error(
'The "iam" option is required when mounting with named profile option, "awsprofile"'
)
if "awscredsuri" in options and "iam" not in options:
fatal_error('The "iam" option is required when mounting with "awscredsuri"')
if "awscredsuri" in options and "awsprofile" in options:
fatal_error('The "awscredsuri" and "awsprofile" options are mutually exclusive')
def bootstrap_cloudwatch_logging(config, options, fs_id=None):
if not check_if_cloudwatch_log_enabled(config):
return None
cloudwatchlog_client = get_botocore_client(config, "logs", options)
if not cloudwatchlog_client:
return None
cloudwatchlog_config = get_cloudwatchlog_config(config, fs_id)
log_group_name = cloudwatchlog_config.get("log_group_name")
log_stream_name = cloudwatchlog_config.get("log_stream_name")
retention_days = cloudwatchlog_config.get("retention_days")
group_creation_completed = create_cloudwatch_log_group(
cloudwatchlog_client, log_group_name
)
if not group_creation_completed:
return None
put_retention_policy_completed = put_cloudwatch_log_retention_policy(
cloudwatchlog_client, log_group_name, retention_days
)
if not put_retention_policy_completed:
return None
stream_creation_completed = create_cloudwatch_log_stream(
cloudwatchlog_client, log_group_name, log_stream_name
)
if not stream_creation_completed:
return None
return {
"client": cloudwatchlog_client,
"log_group_name": log_group_name,
"log_stream_name": log_stream_name,
}
def create_default_cloudwatchlog_agent_if_not_exist(config, options):
if not check_if_cloudwatch_log_enabled(config):
return None
global CLOUDWATCHLOG_AGENT
if not CLOUDWATCHLOG_AGENT:
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config, options)
def get_botocore_client(config, service, options):
if not BOTOCORE_PRESENT:
logging.error("Failed to import botocore, please install botocore first.")
return None
session = botocore.session.get_session()
region = get_target_region(config)
if options and options.get("awsprofile"):
profile = options.get("awsprofile")
session.set_config_variable("profile", profile)
try:
return session.create_client(service, region_name=region)
except ProfileNotFound as e:
fatal_error(
"%s, please add the [profile %s] section in the aws config file following %s and %s."
% (e, profile, NAMED_PROFILE_HELP_URL, CONFIG_FILE_SETTINGS_HELP_URL)
)
return session.create_client(service, region_name=region)
def get_cloudwatchlog_config(config, fs_id=None):
log_group_name = DEFAULT_CLOUDWATCH_LOG_GROUP
if config.has_option(CLOUDWATCH_LOG_SECTION, "log_group_name"):
log_group_name = config.get(CLOUDWATCH_LOG_SECTION, "log_group_name")
if "{fs_id}" in log_group_name:
if fs_id:
# Formatting the log_group_name with the fs_id.
log_group_name = log_group_name.format(fs_id=fs_id)
else:
# If fs_id is None so putting the logs into the log-group by removing '/{fs_id}' in log_group_name.
log_group_name = log_group_name.replace("/{fs_id}", "")
logging.warning(
"Failed to load the File System ID, pushing logs to log group %s.",
log_group_name,
)
logging.debug("Pushing logs to log group named %s in Cloudwatch.", log_group_name)
retention_days = DEFAULT_RETENTION_DAYS
if config.has_option(CLOUDWATCH_LOG_SECTION, "retention_in_days"):
retention_days = config.get(CLOUDWATCH_LOG_SECTION, "retention_in_days")
log_stream_name = get_cloudwatch_log_stream_name(config, fs_id)
return {
"log_group_name": log_group_name,
"retention_days": int(retention_days),
"log_stream_name": log_stream_name,
}
def get_cloudwatch_log_stream_name(config, fs_id=None):
instance_id = get_instance_identity_info_from_instance_metadata(
config, "instanceId"
)
if instance_id and fs_id:
log_stream_name = "%s - %s - mount.log" % (fs_id, instance_id)
elif instance_id:
log_stream_name = "%s - mount.log" % (instance_id)
elif fs_id:
log_stream_name = "%s - mount.log" % (fs_id)
else:
log_stream_name = "default - mount.log"
return log_stream_name
def check_if_platform_is_mac():
return sys.platform in MAC_OS_PLATFORM_LIST
def check_if_mac_version_is_supported():
return any(
release in platform.release() for release in MAC_OS_SUPPORTED_VERSION_LIST
)
def check_if_cloudwatch_log_enabled(config):
# We don't emit warning message here as there will always no `enabled` config item even for a new config file. By default we
# comment out the `enabled = true` in config file so that the cloudwatch log feature is disabled. This is not set as
# `enabled = false` because we enable this feature by uncommenting this item for user who use System Manager Distributor
# to install efs-utils. This gives user an opportunity to still disable the feature by setting `enabled = false`.
return get_boolean_config_item_value(
config,
CLOUDWATCH_LOG_SECTION,
"enabled",
default_value=False,
emit_warning_message=False,
)
def cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name):
cloudwatchlog_client.create_log_group(logGroupName=log_group_name)
logging.info("Created cloudwatch log group %s" % log_group_name)
def create_cloudwatch_log_group(cloudwatchlog_client, log_group_name):
try:
cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name)
except ClientError as e:
exception = e.response["Error"]["Code"]
if exception == "ResourceAlreadyExistsException":
logging.debug(
"Log group %s already exist, %s" % (log_group_name, e.response)
)
return True
elif exception == "LimitExceededException":
logging.error(
"Reached the maximum number of log groups that can be created, %s"
% e.response
)
return False
elif exception == "OperationAbortedException":
logging.debug(
"Multiple requests to update the same log group %s were in conflict, %s"
% (log_group_name, e.response)
)
return False
elif exception == "InvalidParameterException":
logging.error(
"Log group name %s is specified incorrectly, %s"
% (log_group_name, e.response)
)
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning("Credentials are not properly configured, %s" % e)
return False
except EndpointConnectionError as e:
logging.warning("Could not connect to the endpoint, %s" % e)
return False
except Exception as e:
logging.warning("Unknown error, %s." % e)
return False
return True
def cloudwatch_put_retention_policy_helper(
cloudwatchlog_client, log_group_name, retention_days
):
cloudwatchlog_client.put_retention_policy(
logGroupName=log_group_name, retentionInDays=retention_days
)
logging.debug("Set cloudwatch log group retention days to %s" % retention_days)
def put_cloudwatch_log_retention_policy(
cloudwatchlog_client, log_group_name, retention_days
):
try:
cloudwatch_put_retention_policy_helper(
cloudwatchlog_client, log_group_name, retention_days
)
except ClientError as e:
exception = e.response["Error"]["Code"]
if exception == "ResourceNotFoundException":
logging.error(
"Log group %s does not exist, %s" % (log_group_name, e.response)
)
return False
elif exception == "OperationAbortedException":
logging.debug(
"Multiple requests to update the same log group %s were in conflict, %s"
% (log_group_name, e.response)
)
return False
elif exception == "InvalidParameterException":
logging.error(
"Either parameter log group name %s or retention in days %s is specified incorrectly, %s"
% (log_group_name, retention_days, e.response)
)
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning("Credentials are not properly configured, %s" % e)
return False
except EndpointConnectionError as e:
logging.warning("Could not connect to the endpoint, %s" % e)
return False
except Exception as e:
logging.warning("Unknown error, %s." % e)
return False
return True
def cloudwatch_create_log_stream_helper(
cloudwatchlog_client, log_group_name, log_stream_name
):
cloudwatchlog_client.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
logging.info(
"Created cloudwatch log stream %s in log group %s"
% (log_stream_name, log_group_name)
)
def create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name):
try:
cloudwatch_create_log_stream_helper(
cloudwatchlog_client, log_group_name, log_stream_name
)
except ClientError as e:
exception = e.response["Error"]["Code"]
if exception == "ResourceAlreadyExistsException":
logging.debug(
"Log stream %s already exist in log group %s, %s"
% (log_stream_name, log_group_name, e.response)
)
return True
elif exception == "InvalidParameterException":
logging.error(
"Either parameter log group name %s or log stream name %s is specified incorrectly, %s"
% (log_group_name, log_stream_name, e.response)
)
return False
elif exception == "ResourceNotFoundException":
logging.error(
"Log group %s does not exist, %s" % (log_group_name, e.response)
)
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning("Credentials are not properly configured, %s" % e)
return False
except EndpointConnectionError as e:
logging.warning("Could not connect to the endpoint, %s" % e)
return False
except Exception as e:
logging.warning("Unknown error, %s." % e)
return False
return True
def cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token=None):
kwargs = {
"logGroupName": cloudwatchlog_agent.get("log_group_name"),
"logStreamName": cloudwatchlog_agent.get("log_stream_name"),
"logEvents": [
{"timestamp": int(round(time.time() * 1000)), "message": message}
],
}
if token:
kwargs["sequenceToken"] = token
cloudwatchlog_agent.get("client").put_log_events(**kwargs)
def publish_cloudwatch_log(cloudwatchlog_agent, message):
if not cloudwatchlog_agent or not cloudwatchlog_agent.get("client"):
return False
token = get_log_stream_next_token(cloudwatchlog_agent)
try:
cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token)
except ClientError as e:
exception = e.response["Error"]["Code"]
if exception == "InvalidSequenceTokenException":
logging.debug("The sequence token is not valid, %s" % e.response)
return False
elif exception == "InvalidParameterException":
logging.debug(
"One of the parameter to put log events is not valid, %s" % e.response
)
return False
elif exception == "DataAlreadyAcceptedException":
logging.debug("The event %s was already logged, %s" % (message, e.response))
return False
elif exception == "UnrecognizedClientException":
logging.debug(
"The most likely cause is an invalid AWS access key ID or secret Key, %s"
% e.response
)
return False
elif exception == "ResourceNotFoundException":
logging.error(
"Either log group %s or log stream %s does not exist, %s"
% (
cloudwatchlog_agent.get("log_group_name"),
cloudwatchlog_agent.get("log_stream_name"),
e.response,
)
)
return False
else:
logging.debug("Unexpected error: %s" % e)
return False
except NoCredentialsError as e:
logging.warning("Credentials are not properly configured, %s" % e)
return False
except EndpointConnectionError as e:
logging.warning("Could not connect to the endpoint, %s" % e)
return False
except Exception as e:
logging.warning("Unknown error, %s." % e)
return False
return True
def cloudwatch_describe_log_streams_helper(cloudwatchlog_agent):
return cloudwatchlog_agent.get("client").describe_log_streams(
logGroupName=cloudwatchlog_agent.get("log_group_name"),
logStreamNamePrefix=cloudwatchlog_agent.get("log_stream_name"),
)
def get_log_stream_next_token(cloudwatchlog_agent):
try:
response = cloudwatch_describe_log_streams_helper(cloudwatchlog_agent)
except ClientError as e:
exception = e.response["Error"]["Code"]
if exception == "InvalidParameterException":
logging.debug(
"Either parameter log group name %s or log stream name %s is specified incorrectly, %s"
% (
cloudwatchlog_agent.get("log_group_name"),
cloudwatchlog_agent.get("log_stream_name"),
e.response,
)
)
elif exception == "ResourceNotFoundException":
logging.debug(
"Either log group %s or log stream %s does not exist, %s"
% (
cloudwatchlog_agent.get("log_group_name"),
cloudwatchlog_agent.get("log_stream_name"),
e.response,
)
)
else:
handle_general_botocore_exceptions(e)
return None
except NoCredentialsError as e:
logging.warning("Credentials are not properly configured, %s" % e)
return None
except EndpointConnectionError as e:
logging.warning("Could not connect to the endpoint, %s" % e)
return None
except Exception as e:
logging.warning("Unknown error, %s" % e)
return None
try:
log_stream = response["logStreams"][0]
return log_stream.get("uploadSequenceToken")
except (IndexError, TypeError, KeyError):
pass
return None
def ec2_describe_availability_zones_helper(ec2_client, kwargs):
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_availability_zones
return ec2_client.describe_availability_zones(**kwargs)
def get_az_id_by_az_name_helper(ec2_client, az_name, dryrun=False):
operation = "DescribeAvailabilityZones"
kwargs = {"ZoneNames": [az_name]}
if dryrun:
kwargs["DryRun"] = True
try:
az_info = ec2_describe_availability_zones_helper(ec2_client, kwargs)
logging.debug("Found the az information for %s: %s", az_name, az_info)
return az_info
except ClientError as e:
exception = e.response["Error"]["Code"]
exception_message = e.response["Error"]["Message"]
if exception == "DryRunOperation":
logging.debug("Describe availability zones dryrun succeed.")
return
elif exception == "UnauthorizedOperation":
fallback_message = "Unauthorized to perform operation %s." % operation
elif exception == "InvalidParameterValue":
fallback_message = "Invalid availability zone %s" % az_name
elif exception == "ServiceUnavailableException":
fallback_message = (
"The ec2 service cannot complete the request, %s" % exception_message
)
elif exception == "AccessDeniedException":
fallback_message = exception_message
else:
fallback_message = "Unexpected error: %s" % exception_message
except NoCredentialsError as e:
fallback_message = (
"%s when performing operation %s, please confirm your aws credentials are properly configured."
% (e, operation)
)
except EndpointConnectionError as e:
fallback_message = (
"Could not connect to the endpoint when performing operation %s, %s"
% (operation, e)
)
except Exception as e:
fallback_message = "Unknown error when performing operation %s, %s." % (
operation,
e,
)
raise FallbackException(fallback_message)
def get_az_id_by_az_name(ec2_client, az_name):
# Perform a dryrun api call first
get_az_id_by_az_name_helper(ec2_client, az_name, dryrun=True)
az_info = get_az_id_by_az_name_helper(ec2_client, az_name, dryrun=False)
if az_info and az_info.get("AvailabilityZones"):
az_id = az_info["AvailabilityZones"][0]["ZoneId"]
logging.debug("Found AZ mapping [AZ name: %s, AZ ID: %s]", az_name, az_id)
return az_id
return None
def efs_describe_mount_targets_helper(efs_client, kwargs):
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/efs.html#EFS.Client.describe_mount_targets
return efs_client.describe_mount_targets(**kwargs)
def get_mount_targets_info(efs_client, fs_id):
operation = "DescribeMountTargets"
kwargs = {"FileSystemId": fs_id}
try:
mount_targets_info = efs_describe_mount_targets_helper(efs_client, kwargs)
logging.debug(
"Found these mount targets for file system %s: %s",
fs_id,
mount_targets_info,
)
return mount_targets_info.get("MountTargets")
except ClientError as e:
exception = e.response["Error"]["Code"]
exception_message = e.response["Error"]["Message"]
if exception == "FileSystemNotFound":
fallback_message = "The file system %s is not found" % fs_id
elif exception == "ServiceUnavailableException":
fallback_message = (
"The elasticfilesystem service cannot complete the request, %s"
% exception_message
)
elif exception == "AccessDeniedException":
fallback_message = exception_message
else:
fallback_message = "Unexpected error: %s" % exception_message
except NoCredentialsError as e:
fallback_message = (
"%s when performing operation %s, please confirm your aws credentials are properly configured."
% (e, operation)
)
except EndpointConnectionError as e:
fallback_message = (
"Could not connect to the endpoint when performing operation %s, %s"
% (operation, e)
)
except Exception as e:
fallback_message = "Unknown error when performing operation %s, %s." % (
operation,
e,
)
raise FallbackException(fallback_message)
def get_mount_target_in_az(efs_client, ec2_client, fs_id, az_name=None):
if not efs_client or not ec2_client:
raise FallbackException("Boto client cannot be null")
mount_targets = get_mount_targets_info(efs_client, fs_id)
if not mount_targets:
message = (
"Cannot find mount target for the file system %s, please create a mount target in %s."
% (fs_id, az_name if az_name else "any availability zone.")
)
raise FallbackException(message)
available_mount_targets = [
mount_target
for mount_target in mount_targets
if mount_target.get("LifeCycleState") == "available"
]
if not available_mount_targets:
message = (
"No mount target created for the file system %s is in available state yet, please retry in 5 minutes."
% fs_id
)
raise FallbackException(message)
if az_name:
az_id = get_az_id_by_az_name(ec2_client, az_name)
else:
# If the az_name is None, which means the IMDS instance identity retrieve failed,
# in that case randomly pick one available mount target
logging.info(
"No az info passed via options, randomly pick one available mount target."
)
return random.choice(available_mount_targets)
az_names_of_available_mount_targets = [
mount_target.get("AvailabilityZoneName")
for mount_target in available_mount_targets
]
available_mount_targets_message = (
"Available mount target(s) are in az %s" % az_names_of_available_mount_targets
)
if not az_id:
message = (
"No matching az id for the az %s. Please check the az option passed. %s"
% (az_name, available_mount_targets_message)
)
raise FallbackException(message)
for mount_target in mount_targets:
if mount_target["AvailabilityZoneId"] == az_id:
mount_target_state = mount_target.get("LifeCycleState")
if mount_target_state != "available":
message = "Unknown mount target state"
if mount_target_state in ["creating", "updating", "error"]:
message = (
"Mount target in the az %s is %s, please retry in 5 minutes, or use the "
"mount target in the other az by passing the availability zone name option. %s"
% (az_name, mount_target_state, available_mount_targets_message)
)
elif mount_target_state in ["deleted", "deleting"]:
message = (
"Mount target in the availability zone %s is %s, "
'please create a new one in %s, or use the " "mount target '
"in the other az by passing the availability zone name option. %s"
) % (
az_name,
mount_target_state,
az_name,
available_mount_targets_message,
)
raise FallbackException(message)
return mount_target
message = (
"No matching mount target in the az %s. Please create one mount target in %s, or try the mount target in another "
"AZ by passing the availability zone name option. %s"
% (az_name, az_name, available_mount_targets_message)
)
raise FallbackException(message)
def handle_general_botocore_exceptions(error):
exception = error.response["Error"]["Code"]
if exception == "ServiceUnavailableException":
logging.debug("The service cannot complete the request, %s" % error.response)
elif exception == "AccessDeniedException":
logging.debug(
"User is not authorized to perform the action, %s" % error.response
)
else:
logging.debug("Unexpected error: %s" % error)
# A change in the Linux kernel 5.4+ results a throughput regression on NFS client.
# With patch (https://bugzilla.kernel.org/show_bug.cgi?id=204939), starting from 5.4.*,
# Linux NFS client is using a fixed default value of 128K as read_ahead_kb.
# Before this patch, the read_ahead_kb equation is (NFS_MAX_READAHEAD) 15 * (client configured read size).
# Thus, with EFS recommendation of rsize (1MB) in mount option,
# NFS client might see a throughput drop in kernel 5.4+, especially for sequential read.
# To fix the issue, below function will modify read_ahead_kb to 15 * rsize (1MB by default) after mount.
def optimize_readahead_window(mountpoint, options, config):
if not should_revise_readahead(config):
return
fixed_readahead_kb = int(
DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER * int(options["rsize"]) / 1024
)
try:
# use "stat -c '%d' mountpoint" to get Device number in decimal
mountpoint_dev_num = subprocess.check_output(
["stat", "-c", '"%d"', mountpoint], universal_newlines=True
)
# modify read_ahead_kb in /sys/class/bdi/0:[Device Number]/read_ahead_kb
subprocess.check_call(
"echo %s > %s"
% (
fixed_readahead_kb,
NFS_READAHEAD_CONFIG_PATH_FORMAT
% mountpoint_dev_num.strip().strip('"'),
),
shell=True,
)
except subprocess.CalledProcessError as e:
logging.warning(
"failed to modify read_ahead_kb: %s with error %s" % (fixed_readahead_kb, e)
)
# Only modify read_ahead_kb iff
# 1. instance platform is linux
# 2. kernel version of instance is 5.4+
# 3. 'optimize_readahead' is set to true in efs-utils config file
def should_revise_readahead(config):
if platform.system() != "Linux":
return False
if (
get_linux_kernel_version(len(NFS_READAHEAD_OPTIMIZE_LINUX_KERNEL_MIN_VERSION))
< NFS_READAHEAD_OPTIMIZE_LINUX_KERNEL_MIN_VERSION
):
return False
return get_boolean_config_item_value(
config, CONFIG_SECTION, OPTIMIZE_READAHEAD_ITEM, default_value=False
)
# Parse Linux kernel version from platform.release()
# Failback to 0.0.0... as invalid version
# Examples:
# platform.release() Parsed version with desired_length:2
# RHEL 3.10.0-1160.el7.x86_64 [3, 10]
# AL2 5.4.105-48.177.amzn2.x86_64 [5, 4]
# Ubuntu 5.4.0-1038-aws [5, 4]
# OpenSUSE 5.3.18-24.37-default [5, 3]
def get_linux_kernel_version(desired_length):
version = []
try:
version = [
int(v)
for v in platform.release().split("-", 1)[0].split(".")[:desired_length]
]
except ValueError:
logging.warning("Failed to retrieve linux kernel version")
# filling 0 at the end
for i in range(len(version), desired_length):
version.append(0)
return version
def main():
parse_arguments_early_exit()
assert_root()
config = read_config()
bootstrap_logging(config)
if check_if_platform_is_mac() and not check_if_mac_version_is_supported():
fatal_error("We do not support EFS on MacOS " + platform.release())
fs_id, path, mountpoint, options = parse_arguments(config)
logging.info("version=%s options=%s", VERSION, options)
global CLOUDWATCHLOG_AGENT
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config, options, fs_id)
check_unsupported_options(options)
check_options_validity(options)
init_system = get_init_system()
check_network_status(fs_id, init_system)
dns_name, fallback_ip_address = get_dns_name_and_fallback_mount_target_ip_address(
config, fs_id, options
)
if check_if_platform_is_mac() and "notls" not in options:
options["tls"] = None
if "tls" in options:
mount_tls(
config,
init_system,
dns_name,
path,
fs_id,
mountpoint,
options,
fallback_ip_address=fallback_ip_address,
)
else:
mount_nfs(
config,
dns_name,
path,
mountpoint,
options,
fallback_ip_address=fallback_ip_address,
)
if "__main__" == __name__:
main()
|
test_utils.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
import hashlib
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import temptree, make_timestamp_iter, with_tempdir, \
mock_timestamp_now, FakeIterable
import ctypes
import contextlib
import errno
import eventlet
import eventlet.debug
import eventlet.event
import eventlet.patcher
import functools
import grp
import logging
import platform
import os
import mock
import posix
import pwd
import random
import re
import socket
import string
import sys
import json
import math
import inspect
import warnings
import six
from six import StringIO
from six.moves.queue import Queue, Empty
from six.moves import http_client
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from io import BytesIO
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \
set_swift_dir, md5, ShardRangeList
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import POLICIES, reload_storage_policies
from swift.common.swob import Request, Response
from test.unit import requires_o_tmpfile_support_in_tmp, \
quiet_eventlet_exceptions
if six.PY2:
import eventlet.green.httplib as green_http_client
else:
import eventlet.green.http.client as green_http_client
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestUTC(unittest.TestCase):
def test_tzname(self):
self.assertEqual(utils.UTC.tzname(None), 'UTC')
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
def test_invalid_string_conversion(self):
t = utils.Timestamp.now()
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
u'1402436408.91203_000000f0',
b'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def _test_greater_with_offset(self, now, test_values):
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
# Part 1: use the natural time of the Python. This is deliciously
# unpredictable, but completely legitimate and realistic. Finds bugs!
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 2: Same as above, but with fixed time values that reproduce
# specific corner cases.
now = 1519830570.6949348
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 3: The '%f' problem. Timestamps cannot be converted to %f
# strings, then back to timestamps, then compared with originals.
# You can only "import" a floating point representation once.
now = 1519830570.6949348
now = float('%f' % now)
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%s_00000000' % now,
)
self._test_greater_with_offset(now, test_values)
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
def test_out_of_range_comparisons(self):
now = utils.Timestamp.now()
def check_is_later(val):
self.assertTrue(now != val)
self.assertFalse(now == val)
self.assertTrue(now <= val)
self.assertTrue(now < val)
self.assertTrue(val > now)
self.assertTrue(val >= now)
check_is_later(1e30)
check_is_later(1579753284000) # someone gave us ms instead of s!
check_is_later('1579753284000')
check_is_later(b'1e15')
check_is_later(u'1.e+10_f')
def check_is_earlier(val):
self.assertTrue(now != val)
self.assertFalse(now == val)
self.assertTrue(now >= val)
self.assertTrue(now > val)
self.assertTrue(val < now)
self.assertTrue(val <= now)
check_is_earlier(-1)
check_is_earlier(-0.1)
check_is_earlier('-9999999')
check_is_earlier(b'-9999.999')
check_is_earlier(u'-1234_5678')
def test_inversion(self):
ts = utils.Timestamp(0)
self.assertIsInstance(~ts, utils.Timestamp)
self.assertEqual((~ts).internal, '9999999999.99999')
ts = utils.Timestamp(123456.789)
self.assertIsInstance(~ts, utils.Timestamp)
self.assertEqual(ts.internal, '0000123456.78900')
self.assertEqual((~ts).internal, '9999876543.21099')
timestamps = sorted(utils.Timestamp(random.random() * 1e10)
for _ in range(20))
self.assertEqual([x.internal for x in timestamps],
sorted(x.internal for x in timestamps))
self.assertEqual([(~x).internal for x in reversed(timestamps)],
sorted((~x).internal for x in timestamps))
ts = utils.Timestamp.now()
self.assertGreater(~ts, ts) # NB: will break around 2128
ts = utils.Timestamp.now(offset=1)
with self.assertRaises(ValueError) as caught:
~ts
self.assertEqual(caught.exception.args[0],
'Cannot invert timestamps with offsets')
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
self.md5_test_data = "Openstack forever".encode('utf-8')
try:
self.md5_digest = hashlib.md5(self.md5_test_data).hexdigest()
self.fips_enabled = False
except ValueError:
self.md5_digest = '0d6dc3c588ae71a04ce9a6beebbbba06'
self.fips_enabled = True
def test_get_zero_indexed_base_string(self):
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', None),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', 1),
'something-1')
self.assertRaises(ValueError, utils.get_zero_indexed_base_string,
'something', 'not_integer')
@with_tempdir
def test_lock_path(self, tmpdir):
# 2 locks with limit=1 must fail
success = False
with utils.lock_path(tmpdir, 0.1):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
# 2 locks with limit=2 must succeed
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
try:
with utils.lock_path(tmpdir, 0.1, limit=2):
success = True
except LockTimeout as exc:
self.fail('Unexpected exception %s' % exc)
self.assertTrue(success)
# 3 locks with limit=2 must fail
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
with utils.lock_path(tmpdir, 0.1, limit=2):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_invalid_limit(self, tmpdir):
success = False
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=0):
success = True
self.assertFalse(success)
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=-1):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit='1'):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit=1.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_num_sleeps(self, tmpdir):
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
@with_tempdir
def test_lock_path_class(self, tmpdir):
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
@with_tempdir
def test_lock_path_name(self, tmpdir):
# With default limit (1), can't take the same named lock twice
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1, name='foo'):
success = True
self.assertFalse(success)
# With default limit (1), can take two differently named locks
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with utils.lock_path(tmpdir, 0.1, name='bar'):
success = True
self.assertTrue(success)
# With default limit (1), can take a named lock and the default lock
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertTrue(success)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
with self.assertRaises(TypeError):
utils.normalize_delete_at_timestamp(None)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('')
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('abc')
def test_normalize_delete_at_timestamp_high_precision(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593, True),
'1253327593.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890, True),
'1253327593.67890')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593', True),
'1253327593.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890', True),
'1253327593.67890')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593, True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890, True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593', True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890', True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593, True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890, True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593', True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890', True),
'9999999999.99999')
with self.assertRaises(TypeError):
utils.normalize_delete_at_timestamp(None, True)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('', True)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('abc', True)
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_drain_and_close(self):
utils.drain_and_close([])
utils.drain_and_close(iter([]))
drained = [False]
def gen():
yield 'x'
yield 'y'
drained[0] = True
utils.drain_and_close(gen())
self.assertTrue(drained[0])
utils.drain_and_close(Response(status=200, body=b'Some body'))
drained = [False]
utils.drain_and_close(Response(status=200, app_iter=gen()))
self.assertTrue(drained[0])
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
handler = CrashyLogger()
logger.addHandler(handler)
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertGreater(crashy_calls[0], 1)
logger.removeHandler(handler)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertNotIn('once', options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key0': 99,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dict items are not sticky
submit_dict = {'key1': {'key2': {'value3': 3}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value3': 3},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# cached entries are sticky
submit_dict = {}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dicts can be erased...
submit_dict = {'key1': {'key2': {}}}
expect_dict = {'key0': 101,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# top level dicts can be erased...
submit_dict = {'key1': {}}
expect_dict = {'key0': 101}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_set_owner(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
_ret = lambda: None
_ret.pw_uid = 100
_mock_getpwnam = MagicMock(return_value=_ret)
_mock_chown = mock.Mock()
with patch('os.chown', _mock_chown), \
patch('pwd.getpwnam', _mock_getpwnam):
utils.dump_recon_cache(submit_dict, testcache_file,
logger, set_owner="swift")
_mock_getpwnam.assert_called_once_with("swift")
self.assertEqual(_mock_chown.call_args[0][1], 100)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_load_recon_cache(self):
stub_data = {'test': 'foo'}
with NamedTemporaryFile() as f:
f.write(json.dumps(stub_data).encode("utf-8"))
f.flush()
self.assertEqual(stub_data, utils.load_recon_cache(f.name))
# missing files are treated as empty
self.assertFalse(os.path.exists(f.name)) # sanity
self.assertEqual({}, utils.load_recon_cache(f.name))
# Corrupt files are treated as empty. We could crash and make an
# operator fix the corrupt file, but they'll "fix" it with "rm -f
# /var/cache/swift/*.recon", so let's just do it for them.
with NamedTemporaryFile() as f:
f.write(b"{not [valid (json")
f.flush()
self.assertEqual({}, utils.load_recon_cache(f.name))
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_name_and_route(self):
logger = utils.get_logger({}, name='name', log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('name', logger.server)
logger = utils.get_logger({'log_name': 'conf-name'}, name='name',
log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('name', logger.server)
logger = utils.get_logger({'log_name': 'conf-name'}, log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('conf-name', logger.server)
logger = utils.get_logger({'log_name': 'conf-name'})
self.assertEqual('conf-name', logger.name)
self.assertEqual('conf-name', logger.server)
logger = utils.get_logger({})
self.assertEqual('swift', logger.name)
self.assertEqual('swift', logger.server)
logger = utils.get_logger({}, log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('swift', logger.server)
@with_tempdir
def test_get_logger_sysloghandler_plumbing(self, tempdir):
orig_sysloghandler = utils.ThreadSafeSysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
# Some versions of python perform host resolution while initializing
# the handler. See https://bugs.python.org/issue30378
orig_getaddrinfo = socket.getaddrinfo
def fake_getaddrinfo(host, *args):
return orig_getaddrinfo('localhost', *args)
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
syslog_handler_catcher), \
mock.patch.object(socket, 'getaddrinfo', fake_getaddrinfo):
# default log_address
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(expected_args, syslog_handler_args)
# custom log_address - file doesn't exist: fallback to UDP
log_address = os.path.join(tempdir, 'foo')
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# custom log_address - file exists, not a socket: fallback to UDP
with open(log_address, 'w'):
pass
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# custom log_address - file exists, is a socket: use it
os.unlink(log_address)
with contextlib.closing(
socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)) as sock:
sock.settimeout(5)
sock.bind(log_address)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'address': log_address,
'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
side_effect=OSError(errno.EPERM, 'oops')):
with self.assertRaises(OSError) as cm:
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': 'log_address',
}, 'server', log_route='server')
self.assertEqual(errno.EPERM, cm.exception.errno)
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('errno.ECONNREFUSED message test', log_msg)
self.assertIn('Connection refused', log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Host unreachable', log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Connection timeout', log_msg)
log_exception(socket.error(errno.ENETUNREACH, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Network unreachable', log_msg)
log_exception(socket.error(errno.EPIPE, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Broken pipe', log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertIn('my error message', log_msg)
# test eventlet.Timeout
with ConnectionTimeout(42, 'my error message') \
as connection_timeout:
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertNotIn('my error message', log_msg)
with MessageTimeout(42, 'my error message') as message_timeout:
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
# test BadStatusLine
log_exception(http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('BadStatusLine', log_msg)
self.assertIn("''", log_msg)
# green version is separate :-(
log_exception(green_http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('BadStatusLine', log_msg)
self.assertIn("''", log_msg)
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('txn', log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertIn('my#012error#012message', log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('client_ip', log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('client_ip', log_msg)
self.assertIn('1.2.3.4', log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertNotIn('client_ip', log_msg)
self.assertNotIn('1.2.3.4', log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_prefixlogger(self):
# setup stream logging
sio = StringIO()
base_logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
base_logger.logger.addHandler(handler)
logger = utils.PrefixLoggerAdapter(base_logger, {})
logger.set_prefix('some prefix: ')
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'some prefix: test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(
strip_value(sio),
'some prefix: test\nsome prefix: test\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertEqual('some prefix: ', log_msg[:13])
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertEqual('some prefix: ', log_msg[:13])
finally:
base_logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
b'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = b'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
# Unreadable/missing swift.conf causes IOError
# We mock in case the unit tests are run on a laptop with SAIO,
# which does have a natural /etc/swift/swift.conf.
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.SWIFT_CONF_FILE',
'/nosuchfile'), \
self.assertRaises(IOError):
utils.validate_hash_conf()
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def readfp(self, fp):
pass
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.SWIFT_CONF_FILE',
'/dev/null'), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaisesRegexp(
ValueError, 'Unable to find section3 config section in.*',
utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
required_func_calls = ('setgroups', 'setgid', 'setuid')
mock_os = MockOs(called_funcs=required_func_calls)
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem}
self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0]))
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
def test_drop_privileges_no_setgroups(self):
required_func_calls = ('geteuid', 'setgid', 'setuid')
mock_os = MockOs(called_funcs=required_func_calls)
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertNotIn('setgroups', mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
def test_clean_up_daemon_hygene(self):
required_func_calls = ('chdir', 'umask')
# OSError if trying to get session leader, but setsid() OSError is
# ignored by the code under test.
bad_func_calls = ('setsid',)
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.clean_up_daemon_hygiene()
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
for func in bad_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual('/', mock_os.called_funcs['chdir'][0])
self.assertEqual(0o22, mock_os.called_funcs['umask'][0])
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertIsNone(utils.remove_file(file_name))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertIsNone(utils.remove_file(file_name))
self.assertFalse(os.path.exists(file_name))
def test_remove_directory(self):
with temptree([]) as t:
dir_name = os.path.join(t, 'subdir')
os.mkdir(dir_name)
self.assertTrue(os.path.isdir(dir_name))
self.assertIsNone(utils.remove_directory(dir_name))
self.assertFalse(os.path.exists(dir_name))
# assert no raise only if it does not exist, or is not empty
self.assertEqual(os.path.exists(dir_name), False)
self.assertIsNone(utils.remove_directory(dir_name))
_m_rmdir = mock.Mock(
side_effect=OSError(errno.ENOTEMPTY,
os.strerror(errno.ENOTEMPTY)))
with mock.patch('swift.common.utils.os.rmdir', _m_rmdir):
self.assertIsNone(utils.remove_directory(dir_name))
_m_rmdir = mock.Mock(
side_effect=OSError(errno.EPERM, os.strerror(errno.EPERM)))
with mock.patch('swift.common.utils.os.rmdir', _m_rmdir):
self.assertRaises(OSError, utils.remove_directory, dir_name)
@with_tempdir
def test_is_file_older(self, tempdir):
ts = utils.Timestamp(time.time() - 100000)
file_name = os.path.join(tempdir, '%s.data' % ts.internal)
# assert no raise
self.assertFalse(os.path.exists(file_name))
self.assertTrue(utils.is_file_older(file_name, 0))
self.assertFalse(utils.is_file_older(file_name, 1))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertTrue(utils.is_file_older(file_name, 0))
# check that timestamp in file name is not relevant
self.assertFalse(utils.is_file_older(file_name, 50000))
time.sleep(0.01)
self.assertTrue(utils.is_file_older(file_name, 0.009))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_non_negative_float(self):
self.assertEqual(0, utils.non_negative_float('0.0'))
self.assertEqual(0, utils.non_negative_float(0.0))
self.assertEqual(1.1, utils.non_negative_float(1.1))
self.assertEqual(1.1, utils.non_negative_float('1.1'))
self.assertEqual(1.0, utils.non_negative_float('1'))
self.assertEqual(1, utils.non_negative_float(True))
self.assertEqual(0, utils.non_negative_float(False))
with self.assertRaises(ValueError):
utils.non_negative_float(-1.1)
with self.assertRaises(ValueError):
utils.non_negative_float('-1.1')
with self.assertRaises(ValueError):
utils.non_negative_float('one')
def test_non_negative_int(self):
self.assertEqual(0, utils.non_negative_int('0'))
self.assertEqual(0, utils.non_negative_int(0.0))
self.assertEqual(1, utils.non_negative_int(1))
self.assertEqual(1, utils.non_negative_int('1'))
self.assertEqual(1, utils.non_negative_int(True))
self.assertEqual(0, utils.non_negative_int(False))
with self.assertRaises(ValueError):
utils.non_negative_int(-1)
with self.assertRaises(ValueError):
utils.non_negative_int('-1')
with self.assertRaises(ValueError):
utils.non_negative_int('-1.1')
with self.assertRaises(ValueError):
utils.non_negative_int('1.1')
with self.assertRaises(ValueError):
utils.non_negative_int('1.0')
with self.assertRaises(ValueError):
utils.non_negative_int('one')
def test_config_positive_int_value(self):
expectations = {
# value : expected,
u'1': 1,
b'1': 1,
1: 1,
u'2': 2,
b'2': 2,
u'1024': 1024,
b'1024': 1024,
u'0': ValueError,
b'0': ValueError,
u'-1': ValueError,
b'-1': ValueError,
u'0x01': ValueError,
b'0x01': ValueError,
u'asdf': ValueError,
b'asdf': ValueError,
None: ValueError,
0: ValueError,
-1: ValueError,
u'1.2': ValueError, # string expresses float should be value error
b'1.2': ValueError, # string expresses float should be value error
}
for value, expected in expectations.items():
try:
rv = utils.config_positive_int_value(value)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(
'Config option must be an positive int number, '
'not "%s".' % value, e.args[0])
else:
self.assertEqual(expected, rv)
def test_config_float_value(self):
for args, expected in (
((99, None, None), 99.0),
((99.01, None, None), 99.01),
(('99', None, None), 99.0),
(('99.01', None, None), 99.01),
((99, 99, None), 99.0),
((99.01, 99.01, None), 99.01),
(('99', 99, None), 99.0),
(('99.01', 99.01, None), 99.01),
((99, None, 99), 99.0),
((99.01, None, 99.01), 99.01),
(('99', None, 99), 99.0),
(('99.01', None, 99.01), 99.01),
((-99, -99, -99), -99.0),
((-99.01, -99.01, -99.01), -99.01),
(('-99', -99, -99), -99.0),
(('-99.01', -99.01, -99.01), -99.01),):
actual = utils.config_float_value(*args)
self.assertEqual(expected, actual)
for val, minimum in ((99, 100),
('99', 100),
(-99, -98),
('-98.01', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertNotIn('less than', cm.exception.args[0])
for val, maximum in ((99, 98),
('99', 98),
(-99, -100),
('-97.9', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, maximum=maximum)
self.assertIn('less than %s' % maximum, cm.exception.args[0])
self.assertNotIn('greater than', cm.exception.args[0])
for val, minimum, maximum in ((99, 99, 98),
('99', 100, 100),
(99, 98, 98),):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum, maximum=maximum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertIn('less than %s' % maximum, cm.exception.args[0])
def test_config_percent_value(self):
for arg, expected in (
(99, 0.99),
(25.5, 0.255),
('99', 0.99),
('25.5', 0.255),
(0, 0.0),
('0', 0.0),
('100', 1.0),
(100, 1.0),
(1, 0.01),
('1', 0.01),
(25, 0.25)):
actual = utils.config_percent_value(arg)
self.assertEqual(expected, actual)
# bad values
for val in (-1, '-1', 101, '101'):
with self.assertRaises(ValueError) as cm:
utils.config_percent_value(val)
self.assertIn('Config option must be a number, greater than 0, '
'less than 100, not "{}"'.format(val),
cm.exception.args[0])
def test_config_request_node_count_value(self):
def do_test(value, replicas, expected):
self.assertEqual(
expected,
utils.config_request_node_count_value(value)(replicas))
do_test('0', 10, 0)
do_test('1 * replicas', 3, 3)
do_test('1 * replicas', 11, 11)
do_test('2 * replicas', 3, 6)
do_test('2 * replicas', 11, 22)
do_test('11', 11, 11)
do_test('10', 11, 10)
do_test('12', 11, 12)
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with annotate_failure(bad):
with self.assertRaises(ValueError):
utils.config_request_node_count_value(bad)
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('garbage')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write(b"test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), b"test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), b"test string")
f.seek(0)
f.write(b"\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
rdir = os.path.join(tmpdir, "realtmp")
os.mkdir(rdir)
os.symlink(rdir, link)
self.assertFalse(utils.ismount(link))
# Can add a stubfile to make it pass
with open(os.path.join(link, ".ismount"), "w"):
pass
self.assertTrue(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_stubfile(self):
tmpdir = mkdtemp()
fname = os.path.join(tmpdir, ".ismount")
try:
with open(fname, "w") as stubfile:
stubfile.write("")
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=debug_logger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=debug_logger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_get_valid_utf8_str(self):
def do_test(input_value, expected):
actual = utils.get_valid_utf8_str(input_value)
self.assertEqual(expected, actual)
self.assertIsInstance(actual, six.binary_type)
actual.decode('utf-8')
do_test(b'abc', b'abc')
do_test(u'abc', b'abc')
do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81')
do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81')
# test some invalid UTF-8
do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd')
# check surrogate pairs, too
do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'),
do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'),
def test_quote_bytes(self):
self.assertEqual(b'/v1/a/c3/subdirx/',
utils.quote(b'/v1/a/c3/subdirx/'))
self.assertEqual(b'/v1/a%26b/c3/subdirx/',
utils.quote(b'/v1/a&b/c3/subdirx/'))
self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(b'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(b'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'.encode('utf8')))
# Invalid utf8 is parsed as latin1, then re-encoded as utf8??
self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1]))
def test_quote_unicode(self):
self.assertEqual(u'/v1/a/c3/subdirx/',
utils.quote(u'/v1/a/c3/subdirx/'))
self.assertEqual(u'/v1/a%26b/c3/subdirx/',
utils.quote(u'/v1/a&b/c3/subdirx/'))
self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(u'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(u'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_hmac_ip_range(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_get_hmac_ip_range_non_binary_type(self):
self.assertEqual(
utils.get_hmac(u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_parse_override_options(self):
# When override_<thing> is passed in, it takes precedence.
opts = utils.parse_override_options(
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# When override_<thing> is passed in, it applies even in run-once
# mode.
opts = utils.parse_override_options(
once=True,
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# In run-once mode, we honor the passed-in overrides.
opts = utils.parse_override_options(
once=True,
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1, 2, 3])
self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd'])
self.assertEqual(opts.partitions, [100, 200, 300, 400])
# In run-forever mode, we ignore the passed-in overrides.
opts = utils.parse_override_options(
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [])
self.assertEqual(opts.devices, [])
self.assertEqual(opts.partitions, [])
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_log_string_formatter(self):
# Plain ASCII
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'),
'Swift is great')
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
' great')
lf = utils.LogStringFormatter(default='-')
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
'- great')
lf = utils.LogStringFormatter(default='-', quote=True)
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
'- great')
lf = utils.LogStringFormatter(quote=True)
self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'),
'Swift%20is great')
# Unicode & co
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='Swift est',
b=u'g\u00e9nial ^^'),
u'Swift est g\u00e9nial ^^')
lf = utils.LogStringFormatter(quote=True)
self.assertEqual(lf.format('{a} {b}', a='Swift est',
b=u'g\u00e9nial ^^'),
'Swift%20est g%C3%A9nial%20%5E%5E')
def test_str_anonymizer(self):
anon = utils.StrAnonymizer('Swift is great!', 'md5', '')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{MD5}45e6f00d48fdcf86213602a87df18772')
anon = utils.StrAnonymizer('Swift is great!', 'sha1', '')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SHA1}0010a3df215495d8bfa0ae4b66acc2afcc8f4c5c')
anon = utils.StrAnonymizer('Swift is great!', 'md5', 'salty_secret')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SMD5}ef4ce28fe3bdd10b6659458ceb1f3f0c')
anon = utils.StrAnonymizer('Swift is great!', 'sha1', 'salty_secret')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SSHA1}a4968f76acaddff0eb4069ebe8805d9cab44c9fe')
self.assertRaises(ValueError, utils.StrAnonymizer,
'Swift is great!', 'sha257', '')
def test_str_anonymizer_python_maddness(self):
with mock.patch('swift.common.utils.hashlib') as mocklib:
if six.PY2:
# python <2.7.9 doesn't have this algorithms_guaranteed, but
# our if block short-circuts before we explode
mocklib.algorithms = hashlib.algorithms
mocklib.algorithms_guaranteed.sideEffect = AttributeError()
else:
# python 3 doesn't have this algorithms but our if block
# short-circuts before we explode
mocklib.algorithms.sideEffect.sideEffect = AttributeError()
mocklib.algorithms_guaranteed = hashlib.algorithms_guaranteed
utils.StrAnonymizer('Swift is great!', 'sha1', '')
self.assertRaises(ValueError, utils.StrAnonymizer,
'Swift is great!', 'sha257', '')
def test_str_format_time(self):
dt = utils.StrFormatTime(10000.123456789)
self.assertEqual(str(dt), '10000.123456789')
self.assertEqual(dt.datetime, '01/Jan/1970/02/46/40')
self.assertEqual(dt.iso8601, '1970-01-01T02:46:40')
self.assertEqual(dt.asctime, 'Thu Jan 1 02:46:40 1970')
self.assertEqual(dt.s, '10000')
self.assertEqual(dt.ms, '123')
self.assertEqual(dt.us, '123456')
self.assertEqual(dt.ns, '123456789')
self.assertEqual(dt.a, 'Thu')
self.assertEqual(dt.A, 'Thursday')
self.assertEqual(dt.b, 'Jan')
self.assertEqual(dt.B, 'January')
self.assertEqual(dt.c, 'Thu Jan 1 02:46:40 1970')
self.assertEqual(dt.d, '01')
self.assertEqual(dt.H, '02')
self.assertEqual(dt.I, '02')
self.assertEqual(dt.j, '001')
self.assertEqual(dt.m, '01')
self.assertEqual(dt.M, '46')
self.assertEqual(dt.p, 'AM')
self.assertEqual(dt.S, '40')
self.assertEqual(dt.U, '00')
self.assertEqual(dt.w, '4')
self.assertEqual(dt.W, '00')
self.assertEqual(dt.x, '01/01/70')
self.assertEqual(dt.X, '02:46:40')
self.assertEqual(dt.y, '70')
self.assertEqual(dt.Y, '1970')
self.assertIn(dt.Z, ('GMT', 'UTC')) # It depends of Python 2/3
self.assertRaises(ValueError, getattr, dt, 'z')
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch('time.time', mock.MagicMock(side_effect=[10001.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info,
utils.LOG_LINE_DEFAULT_FORMAT,
'md5', '54LT'))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = debug_logger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp()
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_find_shard_range(self):
ts = utils.Timestamp.now().internal
start = utils.ShardRange('a/-a', ts, '', 'a')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
end = utils.ShardRange('a/z-', ts, 'z', '')
ranges = [start, atof, ftol, ltor, rtoz, end]
found = utils.find_shard_range('', ranges)
self.assertEqual(found, None)
found = utils.find_shard_range(' ', ranges)
self.assertEqual(found, start)
found = utils.find_shard_range(' ', ranges[1:])
self.assertEqual(found, None)
found = utils.find_shard_range('b', ranges)
self.assertEqual(found, atof)
found = utils.find_shard_range('f', ranges)
self.assertEqual(found, atof)
found = utils.find_shard_range('f\x00', ranges)
self.assertEqual(found, ftol)
found = utils.find_shard_range('x', ranges)
self.assertEqual(found, rtoz)
found = utils.find_shard_range('r', ranges)
self.assertEqual(found, ltor)
found = utils.find_shard_range('}', ranges)
self.assertEqual(found, end)
found = utils.find_shard_range('}', ranges[:-1])
self.assertEqual(found, None)
# remove l-r from list of ranges and try and find a shard range for an
# item in that range.
found = utils.find_shard_range('p', ranges[:-3] + ranges[-2:])
self.assertEqual(found, None)
# add some sub-shards; a sub-shard's state is less than its parent
# while the parent is undeleted, so insert these ahead of the
# overlapping parent in the list of ranges
ftoh = utils.ShardRange('a/f-h', ts, 'f', 'h')
htok = utils.ShardRange('a/h-k', ts, 'h', 'k')
overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:]
found = utils.find_shard_range('g', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_shard_range('h', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_shard_range('k', overlapping_ranges)
self.assertEqual(found, htok)
found = utils.find_shard_range('l', overlapping_ranges)
self.assertEqual(found, ftol)
found = utils.find_shard_range('m', overlapping_ranges)
self.assertEqual(found, ltor)
ktol = utils.ShardRange('a/k-l', ts, 'k', 'l')
overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:]
found = utils.find_shard_range('l', overlapping_ranges)
self.assertEqual(found, ktol)
def test_parse_db_filename(self):
actual = utils.parse_db_filename('hash.db')
self.assertEqual(('hash', None, '.db'), actual)
actual = utils.parse_db_filename('hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
actual = utils.parse_db_filename(
'/dev/containers/part/ash/hash/hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
self.assertRaises(ValueError, utils.parse_db_filename, '/path/to/dir/')
# These shouldn't come up in practice; included for completeness
self.assertEqual(utils.parse_db_filename('hashunder_.db'),
('hashunder', '', '.db'))
self.assertEqual(utils.parse_db_filename('lots_of_underscores.db'),
('lots', 'of', '.db'))
def test_make_db_file_path(self):
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path('hash.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('hash_oldepoch.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('/path/to/hash.db', epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
# None strips epoch
self.assertEqual('hash.db', utils.make_db_file_path('hash.db', None))
self.assertEqual('/path/to/hash.db', utils.make_db_file_path(
'/path/to/hash_withepoch.db', None))
# epochs shouldn't have offsets
epoch = utils.Timestamp.now(offset=10)
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.normal, actual)
self.assertRaises(ValueError, utils.make_db_file_path,
'/path/to/hash.db', 'bad epoch')
def test_modify_priority(self):
pid = os.getpid()
logger = debug_logger()
called = {}
def _fake_setpriority(*args):
called['setpriority'] = args
def _fake_syscall(*args):
called['syscall'] = args
# Test if current architecture supports changing of priority
try:
utils.NR_ioprio_set()
except OSError as e:
raise unittest.SkipTest(e)
with patch('swift.common.utils._libc_setpriority',
_fake_setpriority), \
patch('swift.common.utils._posix_syscall', _fake_syscall):
called = {}
# not set / default
utils.modify_priority({}, logger)
self.assertEqual(called, {})
called = {}
# just nice
utils.modify_priority({'nice_priority': '1'}, logger)
self.assertEqual(called, {'setpriority': (0, pid, 1)})
called = {}
# just ionice class uses default priority 0
utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# just ionice priority is ignored
utils.modify_priority({'ionice_priority': '4'}, logger)
self.assertEqual(called, {})
called = {}
# bad ionice class
utils.modify_priority({'ionice_class': 'class_foo'}, logger)
self.assertEqual(called, {})
called = {}
# ionice class & priority
utils.modify_priority({
'ionice_class': 'IOPRIO_CLASS_BE',
'ionice_priority': '4',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (251, 1, pid, 2 << 13 | 4)
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (30, 1, pid, 2 << 13 | 4)
})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# all
utils.modify_priority({
'nice_priority': '-15',
'ionice_class': 'IOPRIO_CLASS_IDLE',
'ionice_priority': '6',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (251, 1, pid, 3 << 13 | 6),
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (30, 1, pid, 3 << 13 | 6),
})
else:
self.fail("Unexpected call: %r" % called)
def test__NR_ioprio_set(self):
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(251, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(30, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = b"I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'rb') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp()
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, b"hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, b"bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'rb') as f:
self.assertEqual(f.read(), b"bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support_in_tmp
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp()
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_strict_b64decode(self):
expectations = {
None: ValueError,
0: ValueError,
b'': b'',
u'': b'',
b'A': ValueError,
b'AA': ValueError,
b'AAA': ValueError,
b'AAAA': b'\x00\x00\x00',
u'AAAA': b'\x00\x00\x00',
b'////': b'\xff\xff\xff',
u'////': b'\xff\xff\xff',
b'A===': ValueError,
b'AA==': b'\x00',
b'AAA=': b'\x00\x00',
b' AAAA': ValueError,
b'AAAA ': ValueError,
b'AAAA============': b'\x00\x00\x00',
b'AA&AA==': ValueError,
b'====': b'',
}
failures = []
for value, expected in expectations.items():
try:
result = utils.strict_b64decode(value)
except Exception as e:
if inspect.isclass(expected) and issubclass(
expected, Exception):
if not isinstance(e, expected):
failures.append('%r raised %r (expected to raise %r)' %
(value, e, expected))
else:
failures.append('%r raised %r (expected to return %r)' %
(value, e, expected))
else:
if inspect.isclass(expected) and issubclass(
expected, Exception):
failures.append('%r => %r (expected to raise %r)' %
(value, result, expected))
elif result != expected:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_get_partition_for_hash(self):
hex_hash = 'af088baea4806dcaba30bf07d9e64c77'
self.assertEqual(43, utils.get_partition_for_hash(hex_hash, 6))
self.assertEqual(87, utils.get_partition_for_hash(hex_hash, 7))
self.assertEqual(350, utils.get_partition_for_hash(hex_hash, 9))
self.assertEqual(700, utils.get_partition_for_hash(hex_hash, 10))
self.assertEqual(1400, utils.get_partition_for_hash(hex_hash, 11))
self.assertEqual(0, utils.get_partition_for_hash(hex_hash, 0))
self.assertEqual(0, utils.get_partition_for_hash(hex_hash, -1))
def test_get_partition_from_path(self):
def do_test(path):
self.assertEqual(utils.get_partition_from_path('/s/n', path), 70)
self.assertEqual(utils.get_partition_from_path('/s/n/', path), 70)
path += '/'
self.assertEqual(utils.get_partition_from_path('/s/n', path), 70)
self.assertEqual(utils.get_partition_from_path('/s/n/', path), 70)
do_test('/s/n/d/o/70/c77/af088baea4806dcaba30bf07d9e64c77/f')
# also works with a hashdir
do_test('/s/n/d/o/70/c77/af088baea4806dcaba30bf07d9e64c77')
# or suffix dir
do_test('/s/n/d/o/70/c77')
# or even the part dir itself
do_test('/s/n/d/o/70')
def test_replace_partition_in_path(self):
# Check for new part = part * 2
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path('/s/n/', old, 11),
new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 10), old)
self.assertEqual(utils.replace_partition_in_path('/s/n/', new, 11),
new)
# Check for new part = part * 2 + 1
old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 10), old)
self.assertEqual(utils.replace_partition_in_path('/s/n/', new, 11),
new)
# check hash_dir
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('/s/n', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('/s/n', exp, 11)
self.assertEqual(exp, actual)
# check longer devices path
old = '/s/n/1/2/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/s/n/1/2/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('/s/n/1/2', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('/s/n/1/2', exp, 11)
self.assertEqual(exp, actual)
# check empty devices path
old = '/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('', exp, 11)
self.assertEqual(exp, actual)
# check path validation
path = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
with self.assertRaises(ValueError) as cm:
utils.replace_partition_in_path('/s/n1', path, 11)
self.assertEqual(
"Path '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' "
"is not under device dir '/s/n1'", str(cm.exception))
# check path validation - path lacks leading /
path = 's/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
with self.assertRaises(ValueError) as cm:
utils.replace_partition_in_path('/s/n', path, 11)
self.assertEqual(
"Path 's/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' "
"is not under device dir '/s/n'", str(cm.exception))
def test_round_robin_iter(self):
it1 = iter([1, 2, 3])
it2 = iter([4, 5])
it3 = iter([6, 7, 8, 9])
it4 = iter([])
rr_its = utils.round_robin_iter([it1, it2, it3, it4])
got = list(rr_its)
# Expect that items get fetched in a round-robin fashion from the
# iterators
self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got)
@with_tempdir
def test_get_db_files(self, tempdir):
dbdir = os.path.join(tempdir, 'dbdir')
self.assertEqual([], utils.get_db_files(dbdir))
path_1 = os.path.join(dbdir, 'dbfile.db')
self.assertEqual([], utils.get_db_files(path_1))
os.mkdir(dbdir)
self.assertEqual([], utils.get_db_files(path_1))
with open(path_1, 'wb'):
pass
self.assertEqual([path_1], utils.get_db_files(path_1))
path_2 = os.path.join(dbdir, 'dbfile_2.db')
self.assertEqual([path_1], utils.get_db_files(path_2))
with open(path_2, 'wb'):
pass
self.assertEqual([path_1, path_2], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2], utils.get_db_files(path_2))
path_3 = os.path.join(dbdir, 'dbfile_3.db')
self.assertEqual([path_1, path_2], utils.get_db_files(path_3))
with open(path_3, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
other_hash = os.path.join(dbdir, 'other.db')
self.assertEqual([], utils.get_db_files(other_hash))
other_hash = os.path.join(dbdir, 'other_1.db')
self.assertEqual([], utils.get_db_files(other_hash))
pending = os.path.join(dbdir, 'dbfile.pending')
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
with open(pending, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files(dbdir))
os.unlink(path_1)
self.assertEqual([path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_2, path_3], utils.get_db_files(path_3))
os.unlink(path_2)
self.assertEqual([path_3], utils.get_db_files(path_1))
self.assertEqual([path_3], utils.get_db_files(path_2))
self.assertEqual([path_3], utils.get_db_files(path_3))
os.unlink(path_3)
self.assertEqual([], utils.get_db_files(path_1))
self.assertEqual([], utils.get_db_files(path_2))
self.assertEqual([], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files('/path/to/nowhere'))
def test_get_redirect_data(self):
ts_now = utils.Timestamp.now()
headers = {'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
self.assertIsNone(utils.get_redirect_data(response))
headers = {'Location': '/a/c/o',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
headers = {'Location': '/a/c',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
def do_test(headers):
response = FakeResponse(200, headers, b'')
with self.assertRaises(ValueError) as cm:
utils.get_redirect_data(response)
return cm.exception
exc = do_test({'Location': '/a',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': 'bad'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': '-1'})
self.assertIn('Invalid timestamp', str(exc))
@mock.patch('pkg_resources.load_entry_point')
def test_load_pkg_resource(self, mock_driver):
tests = {
('swift.diskfile', 'egg:swift#replication.fs'):
('swift', 'swift.diskfile', 'replication.fs'),
('swift.diskfile', 'egg:swift#erasure_coding.fs'):
('swift', 'swift.diskfile', 'erasure_coding.fs'),
('swift.section', 'egg:swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'thing.other'):
('swift', 'swift.section', 'thing.other'),
}
for args, expected in tests.items():
utils.load_pkg_resource(*args)
mock_driver.assert_called_with(*expected)
with self.assertRaises(TypeError) as cm:
args = ('swift.diskfile', 'nog:swift#replication.fs')
utils.load_pkg_resource(*args)
self.assertEqual("Unhandled URI scheme: 'nog'", str(cm.exception))
@with_tempdir
def test_systemd_notify(self, tempdir):
m_sock = mock.Mock(connect=mock.Mock(), sendall=mock.Mock())
with mock.patch('swift.common.utils.socket.socket',
return_value=m_sock) as m_socket:
# No notification socket
m_socket.reset_mock()
m_sock.reset_mock()
utils.systemd_notify()
self.assertEqual(m_socket.call_count, 0)
self.assertEqual(m_sock.connect.call_count, 0)
self.assertEqual(m_sock.sendall.call_count, 0)
# File notification socket
m_socket.reset_mock()
m_sock.reset_mock()
os.environ['NOTIFY_SOCKET'] = 'foobar'
utils.systemd_notify()
m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM)
m_sock.connect.assert_called_once_with('foobar')
m_sock.sendall.assert_called_once_with(b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# Abstract notification socket
m_socket.reset_mock()
m_sock.reset_mock()
os.environ['NOTIFY_SOCKET'] = '@foobar'
utils.systemd_notify()
m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM)
m_sock.connect.assert_called_once_with('\0foobar')
m_sock.sendall.assert_called_once_with(b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# Test logger with connection error
m_sock = mock.Mock(connect=mock.Mock(side_effect=EnvironmentError),
sendall=mock.Mock())
m_logger = mock.Mock(debug=mock.Mock())
with mock.patch('swift.common.utils.socket.socket',
return_value=m_sock) as m_socket:
os.environ['NOTIFY_SOCKET'] = '@foobar'
m_sock.reset_mock()
m_logger.reset_mock()
utils.systemd_notify()
self.assertEqual(0, m_sock.sendall.call_count)
self.assertEqual(0, m_logger.debug.call_count)
m_sock.reset_mock()
m_logger.reset_mock()
utils.systemd_notify(logger=m_logger)
self.assertEqual(0, m_sock.sendall.call_count)
m_logger.debug.assert_called_once_with(
"Systemd notification failed", exc_info=True)
# Test it for real
def do_test_real_socket(socket_address, notify_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.bind(socket_address)
os.environ['NOTIFY_SOCKET'] = notify_socket
utils.systemd_notify()
msg = sock.recv(512)
sock.close()
self.assertEqual(msg, b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# test file socket address
socket_path = os.path.join(tempdir, 'foobar')
do_test_real_socket(socket_path, socket_path)
if sys.platform.startswith('linux'):
# test abstract socket address
do_test_real_socket('\0foobar', '@foobar')
def test_md5_with_data(self):
if not self.fips_enabled:
digest = md5(self.md5_test_data).hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
# on a FIPS enabled system, this throws a ValueError:
# [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS
self.assertRaises(ValueError, md5, self.md5_test_data)
if not self.fips_enabled:
digest = md5(self.md5_test_data, usedforsecurity=True).hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(
ValueError, md5, self.md5_test_data, usedforsecurity=True)
digest = md5(self.md5_test_data, usedforsecurity=False).hexdigest()
self.assertEqual(digest, self.md5_digest)
def test_md5_without_data(self):
if not self.fips_enabled:
test_md5 = md5()
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(ValueError, md5)
if not self.fips_enabled:
test_md5 = md5(usedforsecurity=True)
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(ValueError, md5, usedforsecurity=True)
test_md5 = md5(usedforsecurity=False)
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
@unittest.skipIf(sys.version_info.major == 2,
"hashlib.md5 does not raise TypeError here in py2")
def test_string_data_raises_type_error(self):
if not self.fips_enabled:
self.assertRaises(TypeError, hashlib.md5, u'foo')
self.assertRaises(TypeError, md5, u'foo')
self.assertRaises(
TypeError, md5, u'foo', usedforsecurity=True)
else:
self.assertRaises(ValueError, hashlib.md5, u'foo')
self.assertRaises(ValueError, md5, u'foo')
self.assertRaises(
ValueError, md5, u'foo', usedforsecurity=True)
self.assertRaises(
TypeError, md5, u'foo', usedforsecurity=False)
def test_none_data_raises_type_error(self):
if not self.fips_enabled:
self.assertRaises(TypeError, hashlib.md5, None)
self.assertRaises(TypeError, md5, None)
self.assertRaises(
TypeError, md5, None, usedforsecurity=True)
else:
self.assertRaises(ValueError, hashlib.md5, None)
self.assertRaises(ValueError, md5, None)
self.assertRaises(
ValueError, md5, None, usedforsecurity=True)
self.assertRaises(
TypeError, md5, None, usedforsecurity=False)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or next(self.ts)
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, next(self.ts))
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, next(self.ts))
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, next(self.ts))
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = next(self.ts)
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = next(self.ts)
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], next(self.ts))
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], next(self.ts))
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], next(self.ts))
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
def test_get_hub(self):
# This test mock the eventlet.green.select module without poll
# as in eventlet > 0.20
# https://github.com/eventlet/eventlet/commit/614a20462
# We add __original_module_select to sys.modules to mock usage
# of eventlet.patcher.original
class SelectWithPoll(object):
def poll():
pass
class SelectWithoutPoll(object):
pass
# Platform with poll() that call get_hub before eventlet patching
with mock.patch.dict('sys.modules',
{'select': SelectWithPoll,
'__original_module_select': SelectWithPoll}):
self.assertEqual(utils.get_hub(), 'poll')
# Platform with poll() that call get_hub after eventlet patching
with mock.patch.dict('sys.modules',
{'select': SelectWithoutPoll,
'__original_module_select': SelectWithPoll}):
self.assertEqual(utils.get_hub(), 'poll')
# Platform without poll() -- before or after patching doesn't matter
with mock.patch.dict('sys.modules',
{'select': SelectWithoutPoll,
'__original_module_select': SelectWithoutPoll}):
self.assertEqual(utils.get_hub(), 'selects')
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger2 = utils.get_logger(
{'log_statsd_host': 'some.host.com'},
'other-name', log_route='some-route',
statsd_tail_prefix='some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
self.assertEqual(logger2.logger.statsd_client._prefix,
'some-name.more-specific.')
# note: set_statsd_prefix is deprecated
logger2 = utils.get_logger({'log_statsd_host': 'some.host.com'},
'other-name', log_route='some-route')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
self.assertEqual(logger2.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
self.assertEqual(logger2.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
conf = {
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}
logger = utils.get_logger(conf, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger = utils.get_logger(conf, 'other-name', log_route='some-route',
statsd_tail_prefix='some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
# note: set_statsd_prefix is deprecated
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_statsd_set_prefix_deprecation(self):
conf = {'log_statsd_host': 'another.host.com'}
with warnings.catch_warnings(record=True) as cm:
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
logger = utils.get_logger(
conf, 'some-name', log_route='some-route')
logger.logger.statsd_client.set_prefix('some-name.more-specific')
msgs = [str(warning.message)
for warning in cm
if str(warning.message).startswith('set_prefix')]
self.assertEqual(
['set_prefix() is deprecated; use the ``tail_prefix`` argument of '
'the constructor when instantiating the class instead.'],
msgs)
with warnings.catch_warnings(record=True) as cm:
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
logger = utils.get_logger(
conf, 'some-name', log_route='some-route')
logger.set_statsd_prefix('some-name.more-specific')
msgs = [str(warning.message)
for warning in cm
if str(warning.message).startswith('set_prefix')]
self.assertEqual(
['set_prefix() is deprecated; use the ``tail_prefix`` argument of '
'the constructor when instantiating the class instead.'],
msgs)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test patches over the existing mock. If we just stop the
# existing mock, then unittest.exit() blows up, but stacking
# real-fake-fake works okay.
calls = []
def fake_getaddrinfo(host, port, family, *args):
calls.append(family)
if len(calls) == 1:
raise socket.gaierror
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual([socket.AF_INET, socket.AF_INET6], calls)
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = debug_logger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = debug_logger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(400)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(500)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(507)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like range(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches(r'some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
r'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches(
r'alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestSwiftLoggerAdapter(unittest.TestCase):
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger({}, 'foo')
adapter1 = utils.SwiftLoggerAdapter(logger, {})
adapter2 = utils.SwiftLoggerAdapter(logger, {})
locals1 = ('tx_123', '1.2.3.4')
adapter1.thread_locals = locals1
self.assertEqual(adapter1.thread_locals, locals1)
self.assertEqual(adapter2.thread_locals, locals1)
self.assertEqual(logger.thread_locals, locals1)
locals2 = ('tx_456', '1.2.3.456')
logger.thread_locals = locals2
self.assertEqual(adapter1.thread_locals, locals2)
self.assertEqual(adapter2.thread_locals, locals2)
self.assertEqual(logger.thread_locals, locals2)
logger.thread_locals = (None, None)
def test_exception(self):
# verify that the adapter routes exception calls to utils.LogAdapter
# for special case handling
logger = utils.get_logger({})
adapter = utils.SwiftLoggerAdapter(logger, {})
try:
raise OSError(errno.ECONNREFUSED, 'oserror')
except OSError:
with mock.patch('logging.LoggerAdapter.error') as mocked:
adapter.exception('Caught')
mocked.assert_called_with('Caught: Connection refused')
class TestMetricsPrefixLoggerAdapter(unittest.TestCase):
def test_metric_prefix(self):
logger = utils.get_logger({}, 'logger_name')
adapter1 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'one')
adapter2 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'two')
adapter3 = utils.SwiftLoggerAdapter(logger, {})
self.assertEqual('logger_name', logger.name)
self.assertEqual('logger_name', adapter1.logger.name)
self.assertEqual('logger_name', adapter2.logger.name)
self.assertEqual('logger_name', adapter3.logger.name)
with mock.patch.object(logger, 'increment') as mock_increment:
adapter1.increment('test1')
adapter2.increment('test2')
adapter3.increment('test3')
logger.increment('test')
self.assertEqual(
[mock.call('one.test1'), mock.call('two.test2'),
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
adapter1.metric_prefix = 'not one'
with mock.patch.object(logger, 'increment') as mock_increment:
adapter1.increment('test1')
adapter2.increment('test2')
adapter3.increment('test3')
logger.increment('test')
self.assertEqual(
[mock.call('not one.test1'), mock.call('two.test2'),
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
expected_dirs = list()
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
expected_dirs.append((hash_path, 'drive', 'partition1'))
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
expected_dirs.append((hash_path, 'drive', 'partition2'))
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# check yield_hash_dirs option
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger,
yield_hash_dirs=True,
)
got_dirs = list(locations)
self.assertEqual(sorted(got_dirs), sorted(expected_dirs))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
def test_hooks(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix1")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash1")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
hook_pre_device = MagicMock()
hook_post_device = MagicMock()
hook_pre_partition = MagicMock()
hook_post_partition = MagicMock()
hook_pre_suffix = MagicMock()
hook_post_suffix = MagicMock()
hook_pre_hash = MagicMock()
hook_post_hash = MagicMock()
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger,
hook_pre_device=hook_pre_device,
hook_post_device=hook_post_device,
hook_pre_partition=hook_pre_partition,
hook_post_partition=hook_post_partition,
hook_pre_suffix=hook_pre_suffix,
hook_post_suffix=hook_post_suffix,
hook_pre_hash=hook_pre_hash,
hook_post_hash=hook_post_hash
)
list(locations)
hook_pre_device.assert_called_once_with(os.path.join(tmpdir,
"drive"))
hook_post_device.assert_called_once_with(os.path.join(tmpdir,
"drive"))
hook_pre_partition.assert_called_once_with(partition)
hook_post_partition.assert_called_once_with(partition)
hook_pre_suffix.assert_called_once_with(suffix)
hook_post_suffix.assert_called_once_with(suffix)
hook_pre_hash.assert_called_once_with(hash_path)
hook_post_hash.assert_called_once_with(hash_path)
def test_filters(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix1")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash1")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
def audit_location_generator(**kwargs):
return utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger,
**kwargs)
# Return the list of devices
with patch('os.listdir', side_effect=os.listdir) as m_listdir:
# devices_filter
m_listdir.reset_mock()
devices_filter = MagicMock(return_value=["drive"])
list(audit_location_generator(devices_filter=devices_filter))
devices_filter.assert_called_once_with(tmpdir, ["drive"])
self.assertIn(((data,),), m_listdir.call_args_list)
m_listdir.reset_mock()
devices_filter = MagicMock(return_value=[])
list(audit_location_generator(devices_filter=devices_filter))
devices_filter.assert_called_once_with(tmpdir, ["drive"])
self.assertNotIn(((data,),), m_listdir.call_args_list)
# partitions_filter
m_listdir.reset_mock()
partitions_filter = MagicMock(return_value=["partition1"])
list(audit_location_generator(
partitions_filter=partitions_filter))
partitions_filter.assert_called_once_with(data,
["partition1"])
self.assertIn(((partition,),), m_listdir.call_args_list)
m_listdir.reset_mock()
partitions_filter = MagicMock(return_value=[])
list(audit_location_generator(
partitions_filter=partitions_filter))
partitions_filter.assert_called_once_with(data,
["partition1"])
self.assertNotIn(((partition,),), m_listdir.call_args_list)
# suffixes_filter
m_listdir.reset_mock()
suffixes_filter = MagicMock(return_value=["suffix1"])
list(audit_location_generator(suffixes_filter=suffixes_filter))
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
self.assertIn(((suffix,),), m_listdir.call_args_list)
m_listdir.reset_mock()
suffixes_filter = MagicMock(return_value=[])
list(audit_location_generator(suffixes_filter=suffixes_filter))
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
self.assertNotIn(((suffix,),), m_listdir.call_args_list)
# hashes_filter
m_listdir.reset_mock()
hashes_filter = MagicMock(return_value=["hash1"])
list(audit_location_generator(hashes_filter=hashes_filter))
hashes_filter.assert_called_once_with(suffix, ["hash1"])
self.assertIn(((hash_path,),), m_listdir.call_args_list)
m_listdir.reset_mock()
hashes_filter = MagicMock(return_value=[])
list(audit_location_generator(hashes_filter=hashes_filter))
hashes_filter.assert_called_once_with(suffix, ["hash1"])
self.assertNotIn(((hash_path,),), m_listdir.call_args_list)
@with_tempdir
def test_error_counter(self, tmpdir):
def assert_no_errors(devices, mount_check=False):
logger = debug_logger()
error_counter = {}
locations = utils.audit_location_generator(
devices, "data", mount_check=mount_check, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual([], logger.get_lines_for_level('warning'))
self.assertEqual([], logger.get_lines_for_level('error'))
self.assertEqual({}, error_counter)
# no devices, no problem
devices = os.path.join(tmpdir, 'devices1')
os.makedirs(devices)
assert_no_errors(devices)
# empty dir under devices/
devices = os.path.join(tmpdir, 'devices2')
os.makedirs(devices)
dev_dir = os.path.join(devices, 'device_is_empty_dir')
os.makedirs(dev_dir)
def assert_listdir_error(devices, expected):
logger = debug_logger()
error_counter = {}
locations = utils.audit_location_generator(
devices, "data", mount_check=False, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
self.assertEqual({'unlistable_partitions': expected},
error_counter)
# file under devices/
devices = os.path.join(tmpdir, 'devices3')
os.makedirs(devices)
with open(os.path.join(devices, 'device_is_file'), 'w'):
pass
listdir_error_data_dir = os.path.join(devices, 'device_is_file',
'data')
assert_listdir_error(devices, [listdir_error_data_dir])
# dir under devices/
devices = os.path.join(tmpdir, 'devices4')
device = os.path.join(devices, 'device')
os.makedirs(device)
expected_datadir = os.path.join(devices, 'device', 'data')
assert_no_errors(devices)
# error for dir under devices/
orig_listdir = utils.listdir
def mocked(path):
if path.endswith('data'):
raise OSError
return orig_listdir(path)
with mock.patch('swift.common.utils.listdir', mocked):
assert_listdir_error(devices, [expected_datadir])
# mount check error
devices = os.path.join(tmpdir, 'devices5')
device = os.path.join(devices, 'device')
os.makedirs(device)
# no check
with mock.patch('swift.common.utils.ismount', return_value=False):
assert_no_errors(devices, mount_check=False)
# check passes
with mock.patch('swift.common.utils.ismount', return_value=True):
assert_no_errors(devices, mount_check=True)
# check fails
logger = debug_logger()
error_counter = {}
with mock.patch('swift.common.utils.ismount', return_value=False):
locations = utils.audit_location_generator(
devices, "data", mount_check=True, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
self.assertEqual({'unmounted': ['device']}, error_counter)
class TestGreenAsyncPile(unittest.TestCase):
def setUp(self):
self.timeout = Timeout(5.0)
def tearDown(self):
self.timeout.cancel()
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertIsNone(next(pile))
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
def _exploder(self, arg):
if isinstance(arg, Exception):
raise arg
else:
return arg
def test_blocking_last_next_explodes(self):
pile = utils.GreenAsyncPile(2)
pile.spawn(self._exploder, 1)
pile.spawn(self._exploder, 2)
pile.spawn(self._exploder, Exception('kaboom'))
self.assertEqual(1, next(pile))
self.assertEqual(2, next(pile))
with self.assertRaises(StopIteration):
next(pile)
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
def test_no_blocking_last_next_explodes(self):
pile = utils.GreenAsyncPile(10)
pile.spawn(self._exploder, 1)
self.assertEqual(1, next(pile))
pile.spawn(self._exploder, 2)
self.assertEqual(2, next(pile))
pile.spawn(self._exploder, Exception('kaboom'))
with self.assertRaises(StopIteration):
next(pile)
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
def test_exceptions_in_streaming_pile(self):
with utils.StreamingPile(2) as pile:
results = list(pile.asyncstarmap(self._exploder, [
(1,),
(Exception('kaboom'),),
(3,),
]))
self.assertEqual(results, [1, 3])
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
def test_exceptions_at_end_of_streaming_pile(self):
with utils.StreamingPile(2) as pile:
results = list(pile.asyncstarmap(self._exploder, [
(1,),
(2,),
(Exception('kaboom'),),
]))
self.assertEqual(results, [1, 2])
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestSpliterator(unittest.TestCase):
def test_string(self):
input_chunks = ["coun", "ter-", "b", "ra", "nch-mater",
"nit", "y-fungusy", "-nummular"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(8)), "counter-")
self.assertEqual(''.join(si.take(7)), "branch-")
self.assertEqual(''.join(si.take(10)), "maternity-")
self.assertEqual(''.join(si.take(8)), "fungusy-")
self.assertEqual(''.join(si.take(8)), "nummular")
def test_big_input_string(self):
input_chunks = ["iridium"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(2)), "ir")
self.assertEqual(''.join(si.take(1)), "i")
self.assertEqual(''.join(si.take(2)), "di")
self.assertEqual(''.join(si.take(1)), "u")
self.assertEqual(''.join(si.take(1)), "m")
def test_chunk_boundaries(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(7)), "soylent")
self.assertEqual(''.join(si.take(5)), "green")
self.assertEqual(''.join(si.take(2)), "is")
self.assertEqual(''.join(si.take(6)), "people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestGetExpirerContainer(unittest.TestCase):
@mock.patch.object(utils, 'hash_path', return_value=hex(101)[2:])
def test_get_expirer_container(self, mock_hash_path):
container = utils.get_expirer_container(1234, 20, 'a', 'c', 'o')
self.assertEqual(container, '0000001219')
container = utils.get_expirer_container(1234, 200, 'a', 'c', 'o')
self.assertEqual(container, '0000001199')
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'')
self.assertRaises(StopIteration, next, it)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
self.assertRaises(StopIteration, next, it)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
b'--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabc'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abc')
self.assertRaises(StopIteration, next, it)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
b'jkl\r\n\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
b'\r\njkl\r\n\r\n--unique--'),
b'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = BytesIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=debug_logger())
self.assertEqual(body, '')
def test_single_part(self):
body = b"time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(BytesIO(body).read, b'')}]
resp_body = b''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), b'dontcare',
multipart=False, logger=debug_logger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = b"two peanuts were walking down a railroad track"
part2 = b"and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(BytesIO(part1).read, b''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(BytesIO(part2).read, b''),
}]
resp_body = b''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), b'boundaryboundary',
multipart=True, logger=debug_logger()))
self.assertEqual(resp_body, (
b"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
b"Content-Type: application/peanut\r\n" +
b"Content-Range: bytes 88-133/1024\r\n" +
b"\r\n" +
part1 + b"\r\n" +
b"--boundaryboundary\r\n"
b"Content-Type: application/salted\r\n" +
b"Content-Range: bytes 500-532/1024\r\n" +
b"\r\n" +
part2 + b"\r\n" +
b"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=debug_logger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=debug_logger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = b'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i).encode('utf8') * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = b''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(b''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertIs(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
b'': 'd41d8cd98f00b204e9800998ecf8427e',
b'some data': '1e50210a0202497fb79bc38b6ade6c34',
(b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
class TestFsHasFreeSpace(unittest.TestCase):
def test_bytes(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, False))
self.assertTrue(utils.fs_has_free_space("/", 1, False))
# free space left = f_bavail * f_bsize = 7078252544
self.assertTrue(utils.fs_has_free_space("/", 7078252544, False))
self.assertFalse(utils.fs_has_free_space("/", 7078252545, False))
self.assertFalse(utils.fs_has_free_space("/", 2 ** 64, False))
def test_percent(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, True))
self.assertTrue(utils.fs_has_free_space("/", 1, True))
# percentage of free space for the faked statvfs is 60%
self.assertTrue(utils.fs_has_free_space("/", 60, True))
self.assertFalse(utils.fs_has_free_space("/", 61, True))
self.assertFalse(utils.fs_has_free_space("/", 100, True))
self.assertFalse(utils.fs_has_free_space("/", 110, True))
class TestSetSwiftDir(unittest.TestCase):
def setUp(self):
self.swift_dir = tempfile.mkdtemp()
self.swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
with open(self.swift_conf, "wt") as sc:
sc.write('''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = %s
''' % self.policy_name)
def tearDown(self):
shutil.rmtree(self.swift_dir, ignore_errors=True)
def test_set_swift_dir(self):
set_swift_dir(None)
reload_storage_policies()
self.assertIsNone(POLICIES.get_by_name(self.policy_name))
set_swift_dir(self.swift_dir)
reload_storage_policies()
self.assertIsNotNone(POLICIES.get_by_name(self.policy_name))
class TestPipeMutex(unittest.TestCase):
def setUp(self):
self.mutex = utils.PipeMutex()
def tearDown(self):
self.mutex.close()
def test_nonblocking(self):
evt_lock1 = eventlet.event.Event()
evt_lock2 = eventlet.event.Event()
evt_unlock = eventlet.event.Event()
def get_the_lock():
self.mutex.acquire()
evt_lock1.send('got the lock')
evt_lock2.wait()
self.mutex.release()
evt_unlock.send('released the lock')
eventlet.spawn(get_the_lock)
evt_lock1.wait() # Now, the other greenthread has the lock.
self.assertFalse(self.mutex.acquire(blocking=False))
evt_lock2.send('please release the lock')
evt_unlock.wait() # The other greenthread has released the lock.
self.assertTrue(self.mutex.acquire(blocking=False))
def test_recursive(self):
self.assertTrue(self.mutex.acquire(blocking=False))
self.assertTrue(self.mutex.acquire(blocking=False))
def try_acquire_lock():
return self.mutex.acquire(blocking=False)
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertTrue(eventlet.spawn(try_acquire_lock).wait())
def test_release_without_acquire(self):
self.assertRaises(RuntimeError, self.mutex.release)
def test_too_many_releases(self):
self.mutex.acquire()
self.mutex.release()
self.assertRaises(RuntimeError, self.mutex.release)
def test_wrong_releaser(self):
self.mutex.acquire()
with quiet_eventlet_exceptions():
self.assertRaises(RuntimeError,
eventlet.spawn(self.mutex.release).wait)
def test_blocking(self):
evt = eventlet.event.Event()
sequence = []
def coro1():
eventlet.sleep(0) # let coro2 go
self.mutex.acquire()
sequence.append('coro1 acquire')
evt.send('go')
self.mutex.release()
sequence.append('coro1 release')
def coro2():
evt.wait() # wait for coro1 to start us
self.mutex.acquire()
sequence.append('coro2 acquire')
self.mutex.release()
sequence.append('coro2 release')
c1 = eventlet.spawn(coro1)
c2 = eventlet.spawn(coro2)
c1.wait()
c2.wait()
self.assertEqual(sequence, [
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release'])
def test_blocking_tpool(self):
# Note: this test's success isn't a guarantee that the mutex is
# working. However, this test's failure means that the mutex is
# definitely broken.
sequence = []
def do_stuff():
n = 10
while n > 0:
self.mutex.acquire()
sequence.append("<")
eventlet.sleep(0.0001)
sequence.append(">")
self.mutex.release()
n -= 1
greenthread1 = eventlet.spawn(do_stuff)
greenthread2 = eventlet.spawn(do_stuff)
real_thread1 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread2.start()
greenthread1.wait()
greenthread2.wait()
real_thread1.join()
real_thread2.join()
self.assertEqual(''.join(sequence), "<>" * 40)
def test_blocking_preserves_ownership(self):
pthread1_event = eventlet.patcher.original('threading').Event()
pthread2_event1 = eventlet.patcher.original('threading').Event()
pthread2_event2 = eventlet.patcher.original('threading').Event()
thread_id = []
owner = []
def pthread1():
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
owner.append(self.mutex.owner)
pthread2_event1.set()
orig_os_write = utils.os.write
def patched_os_write(*a, **kw):
try:
return orig_os_write(*a, **kw)
finally:
pthread1_event.wait()
with mock.patch.object(utils.os, 'write', patched_os_write):
self.mutex.release()
pthread2_event2.set()
def pthread2():
pthread2_event1.wait() # ensure pthread1 acquires lock first
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
pthread1_event.set()
pthread2_event2.wait()
owner.append(self.mutex.owner)
self.mutex.release()
real_thread1 = eventlet.patcher.original('threading').Thread(
target=pthread1)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=pthread2)
real_thread2.start()
real_thread1.join()
real_thread2.join()
self.assertEqual(thread_id, owner)
self.assertIsNone(self.mutex.owner)
@classmethod
def tearDownClass(cls):
# PipeMutex turns this off when you instantiate one
eventlet.debug.hub_prevent_multiple_readers(True)
class TestDistributeEvenly(unittest.TestCase):
def test_evenly_divided(self):
out = utils.distribute_evenly(range(12), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8, 11],
])
out = utils.distribute_evenly(range(12), 4)
self.assertEqual(out, [
[0, 4, 8],
[1, 5, 9],
[2, 6, 10],
[3, 7, 11],
])
def test_uneven(self):
out = utils.distribute_evenly(range(11), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8],
])
def test_just_one(self):
out = utils.distribute_evenly(range(5), 1)
self.assertEqual(out, [[0, 1, 2, 3, 4]])
def test_more_buckets_than_items(self):
out = utils.distribute_evenly(range(5), 7)
self.assertEqual(out, [[0], [1], [2], [3], [4], [], []])
class TestShardRange(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
def test_min_max_bounds(self):
with self.assertRaises(TypeError):
utils.ShardRangeOuterBound()
# max
self.assertEqual(utils.ShardRange.MAX, utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX > utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX < utils.ShardRange.MAX)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MAX == val)
self.assertFalse(val > utils.ShardRange.MAX)
self.assertTrue(val < utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX > val)
self.assertFalse(utils.ShardRange.MAX < val)
self.assertEqual('', str(utils.ShardRange.MAX))
self.assertFalse(utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX == utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX != utils.ShardRange.MAX)
self.assertTrue(
utils.ShardRange.MaxBound() == utils.ShardRange.MaxBound())
self.assertTrue(
utils.ShardRange.MaxBound() is utils.ShardRange.MaxBound())
self.assertTrue(
utils.ShardRange.MaxBound() is utils.ShardRange.MAX)
self.assertFalse(
utils.ShardRange.MaxBound() != utils.ShardRange.MaxBound())
# min
self.assertEqual(utils.ShardRange.MIN, utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN > utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN < utils.ShardRange.MIN)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MIN == val)
self.assertFalse(val < utils.ShardRange.MIN)
self.assertTrue(val > utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN < val)
self.assertFalse(utils.ShardRange.MIN > val)
self.assertFalse(utils.ShardRange.MIN)
self.assertEqual('', str(utils.ShardRange.MIN))
self.assertFalse(utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN != utils.ShardRange.MIN)
self.assertTrue(
utils.ShardRange.MinBound() == utils.ShardRange.MinBound())
self.assertTrue(
utils.ShardRange.MinBound() is utils.ShardRange.MinBound())
self.assertTrue(
utils.ShardRange.MinBound() is utils.ShardRange.MIN)
self.assertFalse(
utils.ShardRange.MinBound() != utils.ShardRange.MinBound())
self.assertFalse(utils.ShardRange.MAX == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN == utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX != utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN != utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX is utils.ShardRange.MIN)
self.assertEqual(utils.ShardRange.MAX,
max(utils.ShardRange.MIN, utils.ShardRange.MAX))
self.assertEqual(utils.ShardRange.MIN,
min(utils.ShardRange.MIN, utils.ShardRange.MAX))
# check the outer bounds are hashable
hashmap = {utils.ShardRange.MIN: 'min',
utils.ShardRange.MAX: 'max'}
self.assertEqual(hashmap[utils.ShardRange.MIN], 'min')
self.assertEqual(hashmap[utils.ShardRange.MinBound()], 'min')
self.assertEqual(hashmap[utils.ShardRange.MAX], 'max')
self.assertEqual(hashmap[utils.ShardRange.MaxBound()], 'max')
def test_shard_range_initialisation(self):
def assert_initialisation_ok(params, expected):
pr = utils.ShardRange(**params)
self.assertDictEqual(dict(pr), expected)
def assert_initialisation_fails(params, err_type=ValueError):
with self.assertRaises(err_type):
utils.ShardRange(**params)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
empty_run = dict(name=None, timestamp=None, lower=None,
upper=None, object_count=0, bytes_used=0,
meta_timestamp=None, deleted=0,
state=utils.ShardRange.FOUND, state_timestamp=None,
epoch=None)
# name, timestamp must be given
assert_initialisation_fails(empty_run.copy())
assert_initialisation_fails(dict(empty_run, name='a/c'), TypeError)
assert_initialisation_fails(dict(empty_run, timestamp=ts_1))
# name must be form a/c
assert_initialisation_fails(dict(empty_run, name='c', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/a/c',
timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/c',
timestamp=ts_1))
# lower, upper can be None
expect = dict(name='a/c', timestamp=ts_1.internal, lower='',
upper='', object_count=0, bytes_used=0,
meta_timestamp=ts_1.internal, deleted=0,
state=utils.ShardRange.FOUND,
state_timestamp=ts_1.internal, epoch=None,
reported=0, tombstones=-1)
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
expect)
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
good_run = dict(name='a/c', timestamp=ts_1, lower='l',
upper='u', object_count=2, bytes_used=10,
meta_timestamp=ts_2, deleted=0,
state=utils.ShardRange.CREATED,
state_timestamp=ts_3.internal, epoch=ts_4,
reported=0, tombstones=11)
expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2,
'bytes_used': 10, 'meta_timestamp': ts_2.internal,
'state': utils.ShardRange.CREATED,
'state_timestamp': ts_3.internal, 'epoch': ts_4,
'reported': 0, 'tombstones': 11})
assert_initialisation_ok(good_run.copy(), expect)
# obj count, tombstones and bytes used as int strings
good_str_run = good_run.copy()
good_str_run.update({'object_count': '2', 'bytes_used': '10',
'tombstones': '11'})
assert_initialisation_ok(good_str_run, expect)
good_no_meta = good_run.copy()
good_no_meta.pop('meta_timestamp')
assert_initialisation_ok(good_no_meta,
dict(expect, meta_timestamp=ts_1.internal))
good_deleted = good_run.copy()
good_deleted['deleted'] = 1
assert_initialisation_ok(good_deleted,
dict(expect, deleted=1))
good_reported = good_run.copy()
good_reported['reported'] = 1
assert_initialisation_ok(good_reported,
dict(expect, reported=1))
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
assert_initialisation_fails(
dict(good_run, meta_timestamp='water balloon'))
assert_initialisation_fails(dict(good_run, lower='water balloon'))
assert_initialisation_fails(dict(good_run, upper='balloon'))
assert_initialisation_fails(
dict(good_run, object_count='water balloon'))
assert_initialisation_fails(dict(good_run, bytes_used='water ballon'))
assert_initialisation_fails(dict(good_run, object_count=-1))
assert_initialisation_fails(dict(good_run, bytes_used=-1))
assert_initialisation_fails(dict(good_run, state=-1))
assert_initialisation_fails(dict(good_run, state_timestamp='not a ts'))
assert_initialisation_fails(dict(good_run, name='/a/c'))
assert_initialisation_fails(dict(good_run, name='/a/c/'))
assert_initialisation_fails(dict(good_run, name='a/c/'))
assert_initialisation_fails(dict(good_run, name='a'))
assert_initialisation_fails(dict(good_run, name=''))
def _check_to_from_dict(self, lower, upper):
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, lower, upper, 10, 100, ts_2,
state=None, state_timestamp=ts_3, epoch=ts_4)
sr_dict = dict(sr)
expected = {
'name': 'a/test', 'timestamp': ts_1.internal, 'lower': lower,
'upper': upper, 'object_count': 10, 'bytes_used': 100,
'meta_timestamp': ts_2.internal, 'deleted': 0,
'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal,
'epoch': ts_4, 'reported': 0, 'tombstones': -1}
self.assertEqual(expected, sr_dict)
self.assertIsInstance(sr_dict['lower'], six.string_types)
self.assertIsInstance(sr_dict['upper'], six.string_types)
sr_new = utils.ShardRange.from_dict(sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
sr_new = utils.ShardRange(**sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
for key in sr_dict:
bad_dict = dict(sr_dict)
bad_dict.pop(key)
if key in ('reported', 'tombstones'):
# These were added after the fact, and we need to be able to
# eat data from old servers
utils.ShardRange.from_dict(bad_dict)
utils.ShardRange(**bad_dict)
continue
# The rest were present from the beginning
with self.assertRaises(KeyError):
utils.ShardRange.from_dict(bad_dict)
# But __init__ still (generally) works!
if key not in ('name', 'timestamp'):
utils.ShardRange(**bad_dict)
else:
with self.assertRaises(TypeError):
utils.ShardRange(**bad_dict)
def test_to_from_dict(self):
self._check_to_from_dict('l', 'u')
self._check_to_from_dict('', '')
def test_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
ts_2 = next(self.ts_iter)
sr.timestamp = ts_2
self.assertEqual(ts_2, sr.timestamp)
sr.timestamp = 0
self.assertEqual(utils.Timestamp(0), sr.timestamp)
with self.assertRaises(TypeError):
sr.timestamp = None
def test_meta_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.meta_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
# meta_timestamp defaults to tracking timestamp
sr.meta_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.meta_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.meta_timestamp)
sr.meta_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.meta_timestamp)
def test_update_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(9, 99)
self.assertEqual(9, sr.object_count)
self.assertEqual(99, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(99, 999, None)
self.assertEqual(99, sr.object_count)
self.assertEqual(999, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.update_meta(21, 2112, ts_2)
self.assertEqual(21, sr.object_count)
self.assertEqual(2112, sr.bytes_used)
self.assertEqual(ts_2, sr.meta_timestamp)
sr.update_meta('11', '12')
self.assertEqual(11, sr.object_count)
self.assertEqual(12, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.update_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
check_bad_args(10, 11, 'bad')
def test_increment_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 1, 2, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.increment_meta(9, 99)
self.assertEqual(10, sr.object_count)
self.assertEqual(101, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
sr.increment_meta('11', '12')
self.assertEqual(21, sr.object_count)
self.assertEqual(113, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.increment_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
def test_update_tombstones(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(-1, sr.tombstones)
self.assertFalse(sr.reported)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_tombstones(1)
self.assertEqual(1, sr.tombstones)
self.assertEqual(now, sr.meta_timestamp)
self.assertFalse(sr.reported)
sr.reported = True
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_tombstones(3, None)
self.assertEqual(3, sr.tombstones)
self.assertEqual(now, sr.meta_timestamp)
self.assertFalse(sr.reported)
sr.reported = True
ts_2 = next(self.ts_iter)
sr.update_tombstones(5, ts_2)
self.assertEqual(5, sr.tombstones)
self.assertEqual(ts_2, sr.meta_timestamp)
self.assertFalse(sr.reported)
# no change in value -> no change in reported
sr.reported = True
ts_3 = next(self.ts_iter)
sr.update_tombstones(5, ts_3)
self.assertEqual(5, sr.tombstones)
self.assertEqual(ts_3, sr.meta_timestamp)
self.assertTrue(sr.reported)
sr.update_meta('11', '12')
self.assertEqual(11, sr.object_count)
self.assertEqual(12, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.update_tombstones(*args)
check_bad_args('bad')
check_bad_args(10, 'bad')
def test_row_count(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(0, sr.row_count)
sr.update_meta(11, 123)
self.assertEqual(11, sr.row_count)
sr.update_tombstones(13)
self.assertEqual(24, sr.row_count)
sr.update_meta(0, 0)
self.assertEqual(13, sr.row_count)
def test_state_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.state_timestamp)
ts_2 = next(self.ts_iter)
sr.state_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
# state_timestamp defaults to tracking timestamp
sr.state_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.state_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.state_timestamp)
sr.state_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.state_timestamp)
def test_state_setter(self):
for state, state_name in utils.ShardRange.STATES.items():
for test_value in (
state, str(state), state_name, state_name.upper()):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
sr.state = test_value
actual = sr.state
self.assertEqual(
state, actual,
'Expected %s but got %s for %s' %
(state, actual, test_value)
)
for bad_state in (max(utils.ShardRange.STATES) + 1,
-1, 99, None, 'stringy', 1.1):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
with self.assertRaises(ValueError) as cm:
sr.state = bad_state
self.assertIn('Invalid state', str(cm.exception))
def test_update_state(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
self.assertEqual(utils.ShardRange.FOUND, sr.state)
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
for state in utils.ShardRange.STATES:
if state == utils.ShardRange.FOUND:
continue
self.assertTrue(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
self.assertFalse(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
for state in utils.ShardRange.STATES:
ts = next(self.ts_iter)
self.assertTrue(sr.update_state(state, state_timestamp=ts))
self.assertEqual(dict(old_sr, state=state, state_timestamp=ts),
dict(sr))
def test_resolve_state(self):
for name, number in utils.ShardRange.STATES_BY_NAME.items():
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.upper()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.title()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(number))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(str(number)))
def check_bad_value(value):
with self.assertRaises(ValueError) as cm:
utils.ShardRange.resolve_state(value)
self.assertIn('Invalid state %r' % value, str(cm.exception))
check_bad_value(min(utils.ShardRange.STATES) - 1)
check_bad_value(max(utils.ShardRange.STATES) + 1)
check_bad_value('badstate')
def test_epoch_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
self.assertIsNone(sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts
self.assertEqual(ts, sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts.internal
self.assertEqual(ts, sr.epoch)
sr.epoch = None
self.assertIsNone(sr.epoch)
with self.assertRaises(ValueError):
sr.epoch = 'bad'
def test_deleted_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
for val in (True, 1):
sr.deleted = val
self.assertIs(True, sr.deleted)
for val in (False, 0, None):
sr.deleted = val
self.assertIs(False, sr.deleted)
def test_set_deleted(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
# initialise other timestamps
sr.update_state(utils.ShardRange.ACTIVE,
state_timestamp=utils.Timestamp.now())
sr.update_meta(1, 2)
old_sr = sr.copy()
self.assertIs(False, sr.deleted) # sanity check
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
old_sr_dict = dict(old_sr)
old_sr_dict.pop('deleted')
old_sr_dict.pop('timestamp')
sr_dict = dict(sr)
sr_dict.pop('deleted')
sr_dict.pop('timestamp')
self.assertEqual(old_sr_dict, sr_dict)
# no change
self.assertFalse(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
# force timestamp change
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted(timestamp=now))
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
def test_lower_setter(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', '')
# sanity checks
self.assertEqual('b', sr.lower_str)
self.assertEqual(sr.MAX, sr.upper)
def do_test(good_value, expected):
sr.lower = good_value
self.assertEqual(expected, sr.lower)
self.assertEqual(sr.MAX, sr.upper)
do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
do_test(b'', utils.ShardRange.MIN)
do_test(u'', utils.ShardRange.MIN)
do_test(None, utils.ShardRange.MIN)
do_test(b'a', 'a')
do_test(b'y', 'y')
do_test(u'a', 'a')
do_test(u'y', 'y')
expected = u'\N{SNOWMAN}'
if six.PY2:
expected = expected.encode('utf-8')
with warnings.catch_warnings(record=True) as captured_warnings:
do_test(u'\N{SNOWMAN}', expected)
do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
self.assertFalse(captured_warnings)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
sr.lower = ''
self.assertEqual(sr.MIN, sr.lower)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
with self.assertRaises(ValueError) as cm:
sr.lower = 'z'
self.assertIn("must be less than or equal to upper", str(cm.exception))
self.assertEqual('b', sr.lower_str)
self.assertEqual('y', sr.upper_str)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
sr.lower = bad_value
self.assertIn("lower must be a string", str(cm.exception))
self.assertEqual('b', sr.lower_str)
self.assertEqual('y', sr.upper_str)
do_test(1)
do_test(1.234)
def test_upper_setter(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
# sanity checks
self.assertEqual(sr.MIN, sr.lower)
self.assertEqual('y', sr.upper_str)
def do_test(good_value, expected):
sr.upper = good_value
self.assertEqual(expected, sr.upper)
self.assertEqual(sr.MIN, sr.lower)
do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
do_test(b'', utils.ShardRange.MAX)
do_test(u'', utils.ShardRange.MAX)
do_test(None, utils.ShardRange.MAX)
do_test(b'z', 'z')
do_test(b'b', 'b')
do_test(u'z', 'z')
do_test(u'b', 'b')
expected = u'\N{SNOWMAN}'
if six.PY2:
expected = expected.encode('utf-8')
with warnings.catch_warnings(record=True) as captured_warnings:
do_test(u'\N{SNOWMAN}', expected)
do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
self.assertFalse(captured_warnings)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
sr.upper = ''
self.assertEqual(sr.MAX, sr.upper)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
with self.assertRaises(ValueError) as cm:
sr.upper = 'a'
self.assertIn(
"must be greater than or equal to lower",
str(cm.exception))
self.assertEqual('b', sr.lower_str)
self.assertEqual('y', sr.upper_str)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
sr.upper = bad_value
self.assertIn("upper must be a string", str(cm.exception))
self.assertEqual('b', sr.lower_str)
self.assertEqual('y', sr.upper_str)
do_test(1)
do_test(1.234)
def test_end_marker(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
self.assertEqual('y\x00', sr.end_marker)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', '')
self.assertEqual('', sr.end_marker)
def test_bounds_serialization(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now())
self.assertEqual('a/c', sr.name)
self.assertEqual(utils.ShardRange.MIN, sr.lower)
self.assertEqual('', sr.lower_str)
self.assertEqual(utils.ShardRange.MAX, sr.upper)
self.assertEqual('', sr.upper_str)
self.assertEqual('', sr.end_marker)
lower = u'\u00e4'
upper = u'\u00fb'
sr = utils.ShardRange('a/%s-%s' % (lower, upper),
utils.Timestamp.now(), lower, upper)
exp_lower = lower
exp_upper = upper
if six.PY2:
exp_lower = exp_lower.encode('utf-8')
exp_upper = exp_upper.encode('utf-8')
self.assertEqual(exp_lower, sr.lower)
self.assertEqual(exp_lower, sr.lower_str)
self.assertEqual(exp_upper, sr.upper)
self.assertEqual(exp_upper, sr.upper_str)
self.assertEqual(exp_upper + '\x00', sr.end_marker)
def test_entire_namespace(self):
# test entire range (no boundaries)
entire = utils.ShardRange('a/test', utils.Timestamp.now())
self.assertEqual(utils.ShardRange.MAX, entire.upper)
self.assertEqual(utils.ShardRange.MIN, entire.lower)
self.assertIs(True, entire.entire_namespace())
for x in range(100):
self.assertTrue(str(x) in entire)
self.assertTrue(chr(x) in entire)
for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'):
self.assertTrue(x in entire, '%r should be in %r' % (x, entire))
entire.lower = 'a'
self.assertIs(False, entire.entire_namespace())
def test_comparisons(self):
ts = utils.Timestamp.now().internal
# upper (if provided) *must* be greater than lower
with self.assertRaises(ValueError):
utils.ShardRange('f-a', ts, 'f', 'a')
# test basic boundaries
btoc = utils.ShardRange('a/b-c', ts, 'b', 'c')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
lower = utils.ShardRange('a/lower', ts, '', 'mid')
upper = utils.ShardRange('a/upper', ts, 'mid', '')
entire = utils.ShardRange('a/test', utils.Timestamp.now())
# overlapping ranges
dtof = utils.ShardRange('a/d-f', ts, 'd', 'f')
dtom = utils.ShardRange('a/d-m', ts, 'd', 'm')
# test range > and <
# non-adjacent
self.assertFalse(rtoz < atof)
self.assertTrue(atof < ltor)
self.assertTrue(ltor > atof)
self.assertFalse(ftol > rtoz)
# adjacent
self.assertFalse(rtoz < ltor)
self.assertTrue(ltor < rtoz)
self.assertFalse(ltor > rtoz)
self.assertTrue(rtoz > ltor)
# wholly within
self.assertFalse(btoc < atof)
self.assertFalse(btoc > atof)
self.assertFalse(atof < btoc)
self.assertFalse(atof > btoc)
self.assertFalse(atof < dtof)
self.assertFalse(dtof > atof)
self.assertFalse(atof > dtof)
self.assertFalse(dtof < atof)
self.assertFalse(dtof < dtom)
self.assertFalse(dtof > dtom)
self.assertFalse(dtom > dtof)
self.assertFalse(dtom < dtof)
# overlaps
self.assertFalse(atof < dtom)
self.assertFalse(atof > dtom)
self.assertFalse(ltor > dtom)
# ranges including min/max bounds
self.assertTrue(upper > lower)
self.assertTrue(lower < upper)
self.assertFalse(upper < lower)
self.assertFalse(lower > upper)
self.assertFalse(lower < entire)
self.assertFalse(entire > lower)
self.assertFalse(lower > entire)
self.assertFalse(entire < lower)
self.assertFalse(upper < entire)
self.assertFalse(entire > upper)
self.assertFalse(upper > entire)
self.assertFalse(entire < upper)
self.assertFalse(entire < entire)
self.assertFalse(entire > entire)
# test range < and > to an item
# range is > lower and <= upper to lower boundary isn't
# actually included
self.assertTrue(ftol > 'f')
self.assertFalse(atof < 'f')
self.assertTrue(ltor < 'y')
self.assertFalse(ftol < 'f')
self.assertFalse(atof > 'f')
self.assertFalse(ltor > 'y')
self.assertTrue('f' < ftol)
self.assertFalse('f' > atof)
self.assertTrue('y' > ltor)
self.assertFalse('f' > ftol)
self.assertFalse('f' < atof)
self.assertFalse('y' < ltor)
# Now test ranges with only 1 boundary
start_to_l = utils.ShardRange('a/None-l', ts, '', 'l')
l_to_end = utils.ShardRange('a/l-None', ts, 'l', '')
for x in ('l', 'm', 'z', 'zzz1231sd'):
if x == 'l':
self.assertFalse(x in l_to_end)
self.assertFalse(start_to_l < x)
self.assertFalse(x > start_to_l)
else:
self.assertTrue(x in l_to_end)
self.assertTrue(start_to_l < x)
self.assertTrue(x > start_to_l)
# Now test some of the range to range checks with missing boundaries
self.assertFalse(atof < start_to_l)
self.assertFalse(start_to_l < entire)
# Now test ShardRange.overlaps(other)
self.assertTrue(atof.overlaps(atof))
self.assertFalse(atof.overlaps(ftol))
self.assertFalse(ftol.overlaps(atof))
self.assertTrue(atof.overlaps(dtof))
self.assertTrue(dtof.overlaps(atof))
self.assertFalse(dtof.overlaps(ftol))
self.assertTrue(dtom.overlaps(ftol))
self.assertTrue(ftol.overlaps(dtom))
self.assertFalse(start_to_l.overlaps(l_to_end))
def test_contains(self):
ts = utils.Timestamp.now().internal
lower = utils.ShardRange('a/-h', ts, '', 'h')
mid = utils.ShardRange('a/h-p', ts, 'h', 'p')
upper = utils.ShardRange('a/p-', ts, 'p', '')
entire = utils.ShardRange('a/all', ts, '', '')
self.assertTrue('a' in entire)
self.assertTrue('x' in entire)
# the empty string is not a valid object name, so it cannot be in any
# range
self.assertFalse('' in lower)
self.assertFalse('' in upper)
self.assertFalse('' in entire)
self.assertTrue('a' in lower)
self.assertTrue('h' in lower)
self.assertFalse('i' in lower)
self.assertFalse('h' in mid)
self.assertTrue('p' in mid)
self.assertFalse('p' in upper)
self.assertTrue('x' in upper)
self.assertIn(utils.ShardRange.MAX, entire)
self.assertNotIn(utils.ShardRange.MAX, lower)
self.assertIn(utils.ShardRange.MAX, upper)
# lower bound is excluded so MIN cannot be in any range.
self.assertNotIn(utils.ShardRange.MIN, entire)
self.assertNotIn(utils.ShardRange.MIN, upper)
self.assertNotIn(utils.ShardRange.MIN, lower)
def test_includes(self):
ts = utils.Timestamp.now().internal
_to_h = utils.ShardRange('a/-h', ts, '', 'h')
d_to_t = utils.ShardRange('a/d-t', ts, 'd', 't')
d_to_k = utils.ShardRange('a/d-k', ts, 'd', 'k')
e_to_l = utils.ShardRange('a/e-l', ts, 'e', 'l')
k_to_t = utils.ShardRange('a/k-t', ts, 'k', 't')
p_to_ = utils.ShardRange('a/p-', ts, 'p', '')
t_to_ = utils.ShardRange('a/t-', ts, 't', '')
entire = utils.ShardRange('a/all', ts, '', '')
self.assertTrue(entire.includes(entire))
self.assertTrue(d_to_t.includes(d_to_t))
self.assertTrue(_to_h.includes(_to_h))
self.assertTrue(p_to_.includes(p_to_))
self.assertTrue(entire.includes(_to_h))
self.assertTrue(entire.includes(d_to_t))
self.assertTrue(entire.includes(p_to_))
self.assertTrue(d_to_t.includes(d_to_k))
self.assertTrue(d_to_t.includes(e_to_l))
self.assertTrue(d_to_t.includes(k_to_t))
self.assertTrue(p_to_.includes(t_to_))
self.assertFalse(_to_h.includes(d_to_t))
self.assertFalse(p_to_.includes(d_to_t))
self.assertFalse(k_to_t.includes(d_to_k))
self.assertFalse(d_to_k.includes(e_to_l))
self.assertFalse(k_to_t.includes(e_to_l))
self.assertFalse(t_to_.includes(p_to_))
self.assertFalse(_to_h.includes(entire))
self.assertFalse(p_to_.includes(entire))
self.assertFalse(d_to_t.includes(entire))
def test_repr(self):
ts = next(self.ts_iter)
ts.offset = 1234
meta_ts = next(self.ts_iter)
state_ts = next(self.ts_iter)
sr = utils.ShardRange('a/c', ts, 'l', 'u', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.ACTIVE,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<%r to %r as of %s, (100, 1000) as of %s, "
"active as of %s>"
% ('l', 'u',
ts.internal, meta_ts.internal, state_ts.internal), str(sr))
ts.offset = 0
meta_ts.offset = 2
state_ts.offset = 3
sr = utils.ShardRange('a/c', ts, '', '', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.FOUND,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<MinBound to MaxBound as of %s, (100, 1000) as of %s, "
"found as of %s>"
% (ts.internal, meta_ts.internal, state_ts.internal), str(sr))
def test_copy(self):
sr = utils.ShardRange('a/c', next(self.ts_iter), 'x', 'y', 99, 99000,
meta_timestamp=next(self.ts_iter),
state=utils.ShardRange.CREATED,
state_timestamp=next(self.ts_iter))
new = sr.copy()
self.assertEqual(dict(sr), dict(new))
new = sr.copy(deleted=1)
self.assertEqual(dict(sr, deleted=1), dict(new))
new_timestamp = next(self.ts_iter)
new = sr.copy(timestamp=new_timestamp)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal),
dict(new))
new = sr.copy(timestamp=new_timestamp, object_count=99)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal,
object_count=99),
dict(new))
def test_make_path(self):
ts = utils.Timestamp.now()
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 0)
parent_hash = md5(b'parent', usedforsecurity=False).hexdigest()
self.assertEqual('a/root-%s-%s-0' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 3)
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path(
'a', 'root', 'parent', ts.internal, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 'foo')
self.assertEqual('a/root-%s-%s-foo' % (parent_hash, ts.internal),
actual)
def test_expand(self):
bounds = (('', 'd'), ('d', 'k'), ('k', 't'), ('t', ''))
donors = [
utils.ShardRange('a/c-%d' % i, utils.Timestamp.now(), b[0], b[1])
for i, b in enumerate(bounds)
]
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
self.assertTrue(acceptor.expand(donors[:1]))
self.assertEqual((utils.ShardRange.MIN, 's'),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
self.assertTrue(acceptor.expand(donors[:2]))
self.assertEqual((utils.ShardRange.MIN, 's'),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
self.assertTrue(acceptor.expand(donors[1:3]))
self.assertEqual(('d', 't'),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
self.assertTrue(acceptor.expand(donors))
self.assertEqual((utils.ShardRange.MIN, utils.ShardRange.MAX),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
self.assertTrue(acceptor.expand(donors[1:2] + donors[3:]))
self.assertEqual(('d', utils.ShardRange.MAX),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), '', 'd')
self.assertFalse(acceptor.expand(donors[:1]))
self.assertEqual((utils.ShardRange.MIN, 'd'),
(acceptor.lower, acceptor.upper))
acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'b', 'v')
self.assertFalse(acceptor.expand(donors[1:3]))
self.assertEqual(('b', 'v'),
(acceptor.lower, acceptor.upper))
class TestShardRangeList(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
self.t1 = next(self.ts_iter)
self.t2 = next(self.ts_iter)
self.ts_iter = make_timestamp_iter()
self.shard_ranges = [
utils.ShardRange('a/b', self.t1, 'a', 'b',
object_count=2, bytes_used=22, tombstones=222),
utils.ShardRange('b/c', self.t2, 'b', 'c',
object_count=4, bytes_used=44, tombstones=444),
utils.ShardRange('c/y', self.t1, 'c', 'y',
object_count=6, bytes_used=66),
]
def test_init(self):
srl = ShardRangeList()
self.assertEqual(0, len(srl))
self.assertEqual(utils.ShardRange.MIN, srl.lower)
self.assertEqual(utils.ShardRange.MIN, srl.upper)
self.assertEqual(0, srl.object_count)
self.assertEqual(0, srl.bytes_used)
self.assertEqual(0, srl.row_count)
def test_init_with_list(self):
srl = ShardRangeList(self.shard_ranges[:2])
self.assertEqual(2, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('c', srl.upper)
self.assertEqual(6, srl.object_count)
self.assertEqual(66, srl.bytes_used)
self.assertEqual(672, srl.row_count)
srl.append(self.shard_ranges[2])
self.assertEqual(3, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('y', srl.upper)
self.assertEqual(12, srl.object_count)
self.assertEqual(132, srl.bytes_used)
self.assertEqual(-1, self.shard_ranges[2].tombstones) # sanity check
self.assertEqual(678, srl.row_count) # NB: tombstones=-1 not counted
def test_pop(self):
srl = ShardRangeList(self.shard_ranges[:2])
srl.pop()
self.assertEqual(1, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('b', srl.upper)
self.assertEqual(2, srl.object_count)
self.assertEqual(22, srl.bytes_used)
self.assertEqual(224, srl.row_count)
def test_slice(self):
srl = ShardRangeList(self.shard_ranges)
sublist = srl[:1]
self.assertIsInstance(sublist, ShardRangeList)
self.assertEqual(1, len(sublist))
self.assertEqual('a', sublist.lower)
self.assertEqual('b', sublist.upper)
self.assertEqual(2, sublist.object_count)
self.assertEqual(22, sublist.bytes_used)
self.assertEqual(224, sublist.row_count)
sublist = srl[1:]
self.assertIsInstance(sublist, ShardRangeList)
self.assertEqual(2, len(sublist))
self.assertEqual('b', sublist.lower)
self.assertEqual('y', sublist.upper)
self.assertEqual(10, sublist.object_count)
self.assertEqual(110, sublist.bytes_used)
self.assertEqual(454, sublist.row_count)
def test_includes(self):
srl = ShardRangeList(self.shard_ranges)
for sr in self.shard_ranges:
self.assertTrue(srl.includes(sr))
self.assertTrue(srl.includes(srl))
sr = utils.ShardRange('a/a', utils.Timestamp.now(), '', 'a')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/a', utils.Timestamp.now(), '', 'b')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/z', utils.Timestamp.now(), 'x', 'z')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/z', utils.Timestamp.now(), 'y', 'z')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/entire', utils.Timestamp.now(), '', '')
self.assertFalse(srl.includes(sr))
# entire range
srl_entire = ShardRangeList([sr])
self.assertFalse(srl.includes(srl_entire))
# make a fresh instance
sr = utils.ShardRange('a/entire', utils.Timestamp.now(), '', '')
self.assertTrue(srl_entire.includes(sr))
def test_timestamps(self):
srl = ShardRangeList(self.shard_ranges)
self.assertEqual({self.t1, self.t2}, srl.timestamps)
t3 = next(self.ts_iter)
self.shard_ranges[2].timestamp = t3
self.assertEqual({self.t1, self.t2, t3}, srl.timestamps)
srl.pop(0)
self.assertEqual({self.t2, t3}, srl.timestamps)
def test_states(self):
srl = ShardRangeList()
self.assertEqual(set(), srl.states)
srl = ShardRangeList(self.shard_ranges)
self.shard_ranges[0].update_state(
utils.ShardRange.CREATED, next(self.ts_iter))
self.shard_ranges[1].update_state(
utils.ShardRange.CLEAVED, next(self.ts_iter))
self.shard_ranges[2].update_state(
utils.ShardRange.ACTIVE, next(self.ts_iter))
self.assertEqual({utils.ShardRange.CREATED,
utils.ShardRange.CLEAVED,
utils.ShardRange.ACTIVE},
srl.states)
def test_filter(self):
srl = ShardRangeList(self.shard_ranges)
self.assertEqual(self.shard_ranges, srl.filter())
self.assertEqual(self.shard_ranges,
srl.filter(marker='', end_marker=''))
self.assertEqual(self.shard_ranges,
srl.filter(marker=utils.ShardRange.MIN,
end_marker=utils.ShardRange.MAX))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MAX,
end_marker=utils.ShardRange.MIN))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MIN,
end_marker=utils.ShardRange.MIN))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MAX,
end_marker=utils.ShardRange.MAX))
self.assertEqual(self.shard_ranges[:1],
srl.filter(marker='', end_marker='b'))
self.assertEqual(self.shard_ranges[1:3],
srl.filter(marker='b', end_marker='y'))
self.assertEqual([],
srl.filter(marker='y', end_marker='y'))
self.assertEqual([],
srl.filter(marker='y', end_marker='x'))
# includes trumps marker & end_marker
self.assertEqual(self.shard_ranges[0:1],
srl.filter(includes='b', marker='c', end_marker='y'))
self.assertEqual(self.shard_ranges[0:1],
srl.filter(includes='b', marker='', end_marker=''))
self.assertEqual([], srl.filter(includes='z'))
def test_find_lower(self):
srl = ShardRangeList(self.shard_ranges)
self.shard_ranges[0].update_state(
utils.ShardRange.CREATED, next(self.ts_iter))
self.shard_ranges[1].update_state(
utils.ShardRange.CLEAVED, next(self.ts_iter))
self.shard_ranges[2].update_state(
utils.ShardRange.ACTIVE, next(self.ts_iter))
def do_test(states):
return srl.find_lower(lambda sr: sr.state in states)
self.assertEqual(srl.upper,
do_test([utils.ShardRange.FOUND]))
self.assertEqual(self.shard_ranges[0].lower,
do_test([utils.ShardRange.CREATED]))
self.assertEqual(self.shard_ranges[0].lower,
do_test((utils.ShardRange.CREATED,
utils.ShardRange.CLEAVED)))
self.assertEqual(self.shard_ranges[1].lower,
do_test((utils.ShardRange.ACTIVE,
utils.ShardRange.CLEAVED)))
self.assertEqual(self.shard_ranges[2].lower,
do_test([utils.ShardRange.ACTIVE]))
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_posix_fallocate')
@patch.object(utils, '_sys_fallocate')
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
class TestFallocate(unittest.TestCase):
def test_fallocate(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20)
# We can't use sys_fallocate_mock.assert_called_once_with because no
# two ctypes.c_uint64 objects are equal even if their values are
# equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 0)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 3 * 2 ** 30)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_fatal_error(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EIO
with self.assertRaises(OSError) as cm:
utils.fallocate(1234, 5000 * 2 ** 20)
self.assertEqual(cm.exception.errno, errno.EIO)
def test_fallocate_silent_errors(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
get_errno_mock.return_value = silent_error
try:
utils.fallocate(1234, 5678)
except OSError:
self.fail("fallocate() raised an error on %d", silent_error)
def test_posix_fallocate_fallback(self, sys_fallocate_mock,
sys_posix_fallocate_mock,
get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 567890)
sys_fallocate_mock.assert_not_called()
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 0)
self.assertEqual(args[2].value, 567890)
def test_posix_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 3 * 2 ** 30)
self.assertEqual(args[2].value, 5000 * 2 ** 20)
sys_fallocate_mock.assert_not_called()
def test_no_fallocates_available(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_posix_fallocate_mock.available = False
with mock.patch("logging.warning") as warning_mock, \
mock.patch.object(utils, "_fallocate_warned_about_missing",
False):
utils.fallocate(321, 654)
utils.fallocate(321, 654)
sys_fallocate_mock.assert_not_called()
sys_posix_fallocate_mock.assert_not_called()
get_errno_mock.assert_not_called()
self.assertEqual(len(warning_mock.mock_calls), 1)
def test_arg_bounds(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.fallocate(0, 1 << 64, 0)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, -1)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.fallocate(0, 0, 0)
self.assertEqual(
[mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
sys_fallocate_mock.reset_mock()
# negative size will be adjusted as 0
utils.fallocate(0, -1, 0)
self.assertEqual(
[mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
@patch.object(os, 'fstatvfs')
@patch.object(utils, '_sys_fallocate', available=True, return_value=0)
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
@patch.object(utils, 'FALLOCATE_IS_PERCENT', False)
@patch.object(utils, '_fallocate_enabled', True)
class TestFallocateReserve(unittest.TestCase):
def _statvfs_result(self, f_frsize, f_bavail):
# Only 3 values are relevant to us, so use zeros for the rest
f_blocks = 100
return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
0, 0, 0, 0, 0))
def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
utils.disable_fallocate()
utils.fallocate(123, 456)
sys_fallocate_mock.assert_not_called()
fstatvfs_mock.assert_not_called()
def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
utils.fallocate(123, 456)
fstatvfs_mock.assert_not_called()
self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
# of size 1024 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
utils.fallocate(88, 1023)
def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1 << 30)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
% (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
sys_fallocate_mock.assert_not_called()
def test_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
# of size 512 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
utils.fallocate(88, 1023)
def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
# of size 512 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
# Want 2048 bytes in reserve but have only 3 blocks of size 512, so
# allocating even 0 bytes fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
fstatvfs_mock.return_value = self._statvfs_result(512, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is bigger than the
# filesystem, so any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('9999999999999')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
self.assertRaises(OSError, utils.fallocate, 88, 0)
sys_fallocate_mock.assert_not_called()
def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2047, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
utils.fallocate(88, 2047)
def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2048, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 2048)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is the whole filesystem, so
# any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_fallocate')
class TestPunchHole(unittest.TestCase):
def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.punch_hole(123, 456, 789)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 123)
self.assertEqual(
args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 456)
self.assertEqual(args[3].value, 789)
def test_error(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EISDIR
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.EISDIR)
def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, -1)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1 << 64, 1)
with self.assertRaises(ValueError):
utils.punch_hole(0, -1, 1)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 0)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.punch_hole(0, 0, 1)
self.assertEqual(
[mock.call(
0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE,
mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.ENOTSUP)
class TestPunchHoleReally(unittest.TestCase):
def setUp(self):
if not utils._sys_fallocate.available:
raise unittest.SkipTest("utils._sys_fallocate not available")
def test_punch_a_hole(self):
with TemporaryFile() as tf:
tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
tf.flush()
# knock out the first half of the "y"s
utils.punch_hole(tf.fileno(), 64, 32)
tf.seek(0)
contents = tf.read(4096)
self.assertEqual(
contents,
b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
class Test_LibcWrapper(unittest.TestCase):
def test_available_function(self):
# This should pretty much always exist
getpid_wrapper = utils._LibcWrapper('getpid')
self.assertTrue(getpid_wrapper.available)
self.assertEqual(getpid_wrapper(), os.getpid())
def test_unavailable_function(self):
# This won't exist
no_func_wrapper = utils._LibcWrapper('diffractively_protectorship')
self.assertFalse(no_func_wrapper.available)
self.assertRaises(NotImplementedError, no_func_wrapper)
def test_argument_plumbing(self):
lseek_wrapper = utils._LibcWrapper('lseek')
with TemporaryFile() as tf:
tf.write(b"abcdefgh")
tf.flush()
lseek_wrapper(tf.fileno(),
ctypes.c_uint64(3),
# 0 is SEEK_SET
0)
self.assertEqual(tf.read(100), b"defgh")
class TestWatchdog(unittest.TestCase):
def test_start_stop(self):
w = utils.Watchdog()
w._evt.send = mock.Mock(side_effect=w._evt.send)
gth = object()
with patch('eventlet.greenthread.getcurrent', return_value=gth),\
patch('time.time', return_value=10.0):
# On first call, _next_expiration is None, it should unblock
# greenthread that is blocked for ever
key = w.start(1.0, Timeout)
self.assertIn(key, w._timeouts)
self.assertEqual(w._timeouts[key], (1.0, 11.0, gth, Timeout))
w._evt.send.assert_called_once()
w.stop(key)
self.assertNotIn(key, w._timeouts)
def test_timeout_concurrency(self):
w = utils.Watchdog()
w._evt.send = mock.Mock(side_effect=w._evt.send)
w._evt.wait = mock.Mock()
gth = object()
w._run()
w._evt.wait.assert_called_once_with(None)
with patch('eventlet.greenthread.getcurrent', return_value=gth):
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=10.00):
# On first call, _next_expiration is None, it should unblock
# greenthread that is blocked for ever
w.start(5.0, Timeout) # Will end at 15.0
w._evt.send.assert_called_once()
with patch('time.time', return_value=10.01):
w._run()
self.assertEqual(15.0, w._next_expiration)
w._evt.wait.assert_called_once_with(15.0 - 10.01)
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=12.00):
# Now _next_expiration is 15.0, it won't unblock greenthread
# because this expiration is later
w.start(5.0, Timeout) # Will end at 17.0
w._evt.send.assert_not_called()
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=14.00):
# Now _next_expiration is still 15.0, it will unblock
# greenthread because this new expiration is 14.5
w.start(0.5, Timeout) # Will end at 14.5
w._evt.send.assert_called_once()
with patch('time.time', return_value=14.01):
w._run()
w._evt.wait.assert_called_once_with(14.5 - 14.01)
self.assertEqual(14.5, w._next_expiration)
# Should wakeup at 14.5
def test_timeout_expire(self):
w = utils.Watchdog()
w._evt.send = mock.Mock() # To avoid it to call get_hub()
w._evt.wait = mock.Mock() # To avoid it to call get_hub()
with patch('eventlet.hubs.get_hub') as m_gh:
with patch('time.time', return_value=10.0):
w.start(5.0, Timeout) # Will end at 15.0
with patch('time.time', return_value=16.0):
w._run()
m_gh.assert_called_once()
m_gh.return_value.schedule_call_global.assert_called_once()
exc = m_gh.return_value.schedule_call_global.call_args[0][2]
self.assertIsInstance(exc, Timeout)
self.assertEqual(exc.seconds, 5.0)
self.assertEqual(None, w._next_expiration)
w._evt.wait.assert_called_once_with(None)
class TestReiterate(unittest.TestCase):
def test_reiterate_consumes_first(self):
test_iter = FakeIterable([1, 2, 3])
reiterated = utils.reiterate(test_iter)
self.assertEqual(1, test_iter.next_call_count)
self.assertEqual(1, next(reiterated))
self.assertEqual(1, test_iter.next_call_count)
self.assertEqual(2, next(reiterated))
self.assertEqual(2, test_iter.next_call_count)
self.assertEqual(3, next(reiterated))
self.assertEqual(3, test_iter.next_call_count)
def test_reiterate_closes(self):
test_iter = FakeIterable([1, 2, 3])
self.assertEqual(0, test_iter.close_call_count)
reiterated = utils.reiterate(test_iter)
self.assertEqual(0, test_iter.close_call_count)
self.assertTrue(hasattr(reiterated, 'close'))
self.assertTrue(callable(reiterated.close))
reiterated.close()
self.assertEqual(1, test_iter.close_call_count)
# empty iter gets closed when reiterated
test_iter = FakeIterable([])
self.assertEqual(0, test_iter.close_call_count)
reiterated = utils.reiterate(test_iter)
self.assertFalse(hasattr(reiterated, 'close'))
self.assertEqual(1, test_iter.close_call_count)
def test_reiterate_list_or_tuple(self):
test_list = [1, 2]
reiterated = utils.reiterate(test_list)
self.assertIs(test_list, reiterated)
test_tuple = (1, 2)
reiterated = utils.reiterate(test_tuple)
self.assertIs(test_tuple, reiterated)
class TestCloseableChain(unittest.TestCase):
def test_closeable_chain_iterates(self):
test_iter1 = FakeIterable([1])
test_iter2 = FakeIterable([2, 3])
chain = utils.CloseableChain(test_iter1, test_iter2)
self.assertEqual([1, 2, 3], [x for x in chain])
chain = utils.CloseableChain([1, 2], [3])
self.assertEqual([1, 2, 3], [x for x in chain])
def test_closeable_chain_closes(self):
test_iter1 = FakeIterable([1])
test_iter2 = FakeIterable([2, 3])
chain = utils.CloseableChain(test_iter1, test_iter2)
self.assertEqual(0, test_iter1.close_call_count)
self.assertEqual(0, test_iter2.close_call_count)
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
self.assertEqual(1, test_iter2.close_call_count)
# check that close is safe to call even when component iters have no
# close
chain = utils.CloseableChain([1, 2], [3])
chain.close()
self.assertEqual([1, 2, 3], [x for x in chain])
# check with generator in the chain
generator_closed = [False]
def gen():
try:
yield 2
yield 3
except GeneratorExit:
generator_closed[0] = True
raise
test_iter1 = FakeIterable([1])
chain = utils.CloseableChain(test_iter1, gen())
self.assertEqual(0, test_iter1.close_call_count)
self.assertFalse(generator_closed[0])
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
# Generator never kicked off, so there's no GeneratorExit
self.assertFalse(generator_closed[0])
test_iter1 = FakeIterable([1])
chain = utils.CloseableChain(gen(), test_iter1)
self.assertEqual(2, next(chain)) # Kick off the generator
self.assertEqual(0, test_iter1.close_call_count)
self.assertFalse(generator_closed[0])
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
self.assertTrue(generator_closed[0])
|
packages_analyzer.py
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import logging
import subprocess
from threading import Thread
import threading
from ambari_commons import OSCheck, OSConst
from ambari_commons import shell
__all__ = ["installedPkgsByName", "allInstalledPackages", "allAvailablePackages", "nameMatch",
"getInstalledRepos", "getInstalledPkgsByRepo", "getInstalledPkgsByNames", "getPackageDetails"]
LIST_INSTALLED_PACKAGES_UBUNTU = "for i in $(dpkg -l |grep ^ii |awk -F' ' '{print $2}'); do apt-cache showpkg \"$i\"|head -3|grep -v '^Versions'| tr -d '()' | awk '{ print $1\" \"$2 }'|sed -e 's/^Package: //;' | paste -d ' ' - -; done"
LIST_AVAILABLE_PACKAGES_UBUNTU = "packages=`for i in $(ls -1 /var/lib/apt/lists | grep -v \"ubuntu.com\") ; do grep ^Package: /var/lib/apt/lists/$i | awk '{print $2}' ; done` ; for i in $packages; do apt-cache showpkg \"$i\"|head -3|grep -v '^Versions'| tr -d '()' | awk '{ print $1\" \"$2 }'|sed -e 's/^Package: //;' | paste -d ' ' - -; done"
logger = logging.getLogger()
# default timeout for async invoked processes
TIMEOUT_SECONDS = 40
def _launch_subprocess(command):
isShell = not isinstance(command, (list, tuple))
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=isShell, close_fds=True)
def subprocessWithTimeout(command):
event = threading.Event()
def watchdog_func(command):
event.wait(TIMEOUT_SECONDS)
if command.returncode is None:
logger.error("Task timed out and will be killed")
shell.kill_process_with_children(command.pid)
pass
osStat = _launch_subprocess(command)
logger.debug("Launching watchdog thread")
event.clear()
thread = Thread(target=watchdog_func, args=(osStat, ))
thread.start()
out, err = osStat.communicate()
result = {}
result['out'] = out
result['err'] = err
result['retCode'] = osStat.returncode
event.set()
thread.join()
return result
def installedPkgsByName(allInstalledPackages,
pkgName, installedPkgs):
"""
Get all installed package whose name starts with the
strings contained in pkgName
"""
for item in allInstalledPackages:
if item[0].find(pkgName) == 0:
installedPkgs.append(item[0])
def allInstalledPackages(allInstalledPackages):
"""
All installed packages in system
"""
osType = OSCheck.get_os_family()
if OSCheck.is_suse_family():
return _lookUpZypperPackages(
["zypper", "search", "--installed-only", "--details"],
allInstalledPackages)
elif OSCheck.is_redhat_family():
return _lookUpYumPackages(
["yum", "list", "installed"],
'Installed Packages',
allInstalledPackages)
elif OSCheck.is_ubuntu_family():
return _lookUpAptPackages(
LIST_INSTALLED_PACKAGES_UBUNTU,
allInstalledPackages)
def allAvailablePackages(allAvailablePackages):
osType = OSCheck.get_os_family()
if OSCheck.is_suse_family():
return _lookUpZypperPackages(
["zypper", "search", "--uninstalled-only", "--details"],
allAvailablePackages)
elif OSCheck.is_redhat_family():
return _lookUpYumPackages(
["yum", "list", "available"],
'Available Packages',
allAvailablePackages)
elif OSCheck.is_ubuntu_family():
return _lookUpAptPackages(
LIST_AVAILABLE_PACKAGES_UBUNTU,
allAvailablePackages)
def _lookUpAptPackages(command, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
for x in result['out'].split('\n'):
if x.strip():
allPackages.append(x.split(' '))
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def _lookUpYumPackages(command, skipTill, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
lines = result['out'].split('\n')
lines = [line.strip() for line in lines]
items = []
skipIndex = 3
for index in range(len(lines)):
if skipTill in lines[index]:
skipIndex = index + 1
break
for line in lines[skipIndex:]:
items = items + line.strip(' \t\n\r').split()
for i in range(0, len(items), 3):
if items[i + 2].find('@') == 0:
items[i + 2] = items[i + 2][1:]
allPackages.append(items[i:i + 3])
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def _lookUpZypperPackages(command, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
lines = result['out'].split('\n')
lines = [line.strip() for line in lines]
items = []
for index in range(len(lines)):
if "--+--" in lines[index]:
skipIndex = index + 1
break
for line in lines[skipIndex:]:
items = line.strip(' \t\n\r').split('|')
allPackages.append([items[1].strip(), items[3].strip(), items[5].strip()])
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def nameMatch(lookupName, actualName):
tokens = actualName.strip().split()
for token in tokens:
if token.lower().find(lookupName.lower()) == 0:
return True
return False
def getInstalledRepos(hintPackages, allPackages, ignoreRepos, repoList):
"""
Gets all installed repos by name based on repos that provide any package
contained in hintPackages
Repos starting with value in ignoreRepos will not be returned
"""
allRepos = []
for hintPackage in hintPackages:
for item in allPackages:
if 0 == item[0].find(hintPackage):
if not item[2] in allRepos:
allRepos.append(item[2])
elif hintPackage[0] == '*':
if item[0].find(hintPackage[1:]) > 0:
if not item[2] in allRepos:
allRepos.append(item[2])
for repo in allRepos:
ignore = False
for ignoredRepo in ignoreRepos:
if nameMatch(ignoredRepo, repo):
ignore = True
if not ignore:
repoList.append(repo)
def getInstalledPkgsByRepo(repos, ignorePackages, installedPackages):
"""
Get all the installed packages from the repos listed in repos
"""
packagesFromRepo = []
packagesToRemove = []
for repo in repos:
subResult = []
for item in installedPackages:
if repo == item[2]:
subResult.append(item[0])
packagesFromRepo = list(set(packagesFromRepo + subResult))
for package in packagesFromRepo:
keepPackage = True
for ignorePackage in ignorePackages:
if nameMatch(ignorePackage, package):
keepPackage = False
break
if keepPackage:
packagesToRemove.append(package)
return packagesToRemove
def getInstalledPkgsByNames(pkgNames, installedPackages):
"""
Gets all installed packages that start with names in pkgNames
"""
packages = []
for pkgName in pkgNames:
subResult = []
installedPkgsByName(installedPackages, pkgName, subResult)
packages = list(set(packages + subResult))
return packages
def getPackageDetails(installedPackages, foundPackages):
"""
Gets the name, version, and repoName for the packages
"""
packageDetails = []
for package in foundPackages:
pkgDetail = {}
for installedPackage in installedPackages:
if package == installedPackage[0]:
pkgDetail['name'] = installedPackage[0]
pkgDetail['version'] = installedPackage[1]
pkgDetail['repoName'] = installedPackage[2]
packageDetails.append(pkgDetail)
return packageDetails
def getReposToRemove(repos, ignoreList):
reposToRemove = []
for repo in repos:
addToRemoveList = True
for ignoreRepo in ignoreList:
if nameMatch(ignoreRepo, repo):
addToRemoveList = False
continue
if addToRemoveList:
reposToRemove.append(repo)
return reposToRemove
|
bluePugs.py
|
#!/usr/bin/python3
# encoding: utf-8
# BluePugs Engine
# By Guanicoe
# guanicoe@pm.me
# https://github.com/guanicoe/Blue-pugs-engine
from collections import deque
from urllib.parse import urlsplit
from billiard import Process, Value
from bs4 import BeautifulSoup
import concurrent.futures as futures
import pandas as pd
import requests
import hashlib
import argparse
import lxml
import zmq
import json
import time
import os
import re
import uuid
import signal
#Custom modules
if __name__ == "__main__":
import config
logpath = 'bluePugs.log'
else:
import bluepugs.engine.config as config
logpath = os.path.join(config.LOG_DIRECTORY, 'bluePugs.log')
import logging
# DEBUG: Detailed information, typically of interest only when diagnosing problems.
# INFO: Confirmation that things are working as expected.
# WARNING: An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’). The software is still working as expected.
# ERROR: Due to a more serious problem, the software has not been able to perform some function.
# CRITICAL: A serious error, indicating that the program itself may be unable to continue running.
logLevel = {"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL}
logger = logging.getLogger(__name__)
logger.setLevel(logLevel[config.LOG_LEVEL])
formatter = logging.Formatter('%(asctime)s - %(levelname)s:%(module)s:%(name)s:%(funcName)s --- %(message)s --- [%(lineno)d]')
file_handler = logging.FileHandler(logpath)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
global KILLING
class SetupZMQ():
def connectZMQ(self,):
self.context = zmq.Context()
self.health = True
self.masterPUSH = self.context.socket(zmq.PUSH)
self.masterPUSH.connect(f"tcp://127.0.0.1:{self.port}")#5551")
logger.debug(f"Successful connection to masterPUSH on port {self.port} - {self.masterPUSH}")
self.masterSUB = self.context.socket(zmq.SUB)
self.masterSUB.connect(f"tcp://127.0.0.1:{self.port+1}")#5553")
self.masterSUB.setsockopt_string(zmq.SUBSCRIBE, self.who)
logger.debug(f"Successful connection to masterSUB on port {self.port+1} - {self.masterSUB}")
def zmqProducer(self):
self.zmq_socket = self.context.socket(zmq.PUSH)
self.zmq_socket.bind(f"tcp://127.0.0.1:{self.port+2}")#5557")
logger.debug(f"Successful connection to zmq_socket on port {self.port+2} - {self.zmq_socket}")
self.get_sink = self.context.socket(zmq.PULL)
self.get_sink.connect(f"tcp://127.0.0.1:{self.port+3}")#5559")
logger.debug(f"Successful connection to get_sink on port {self.port+3} - {self.get_sink}")
return [self.zmq_socket, self.get_sink]
def zmqWorker(self):
# recieve work
self.work_receiver = self.context.socket(zmq.PULL)
self.work_receiver.connect(f"tcp://127.0.0.1:{self.port+2}")#5557")
logger.debug(f"Successful connection to work_receiver on port {self.port+2} - {self.work_receiver}")
self.consumer_sender = self.context.socket(zmq.PUSH)
self.consumer_sender.connect(f"tcp://127.0.0.1:{self.port+4}")#:5558")
logger.debug(f"Successful connection to consumer_sender on port {self.port+4} - {self.consumer_sender}")
return [self.work_receiver, self.consumer_sender]
def zmqSink(self):
self.results_receiver = self.context.socket(zmq.PULL)
self.results_receiver.bind(f"tcp://127.0.0.1:{self.port+4}")#:5558")
logger.debug(f"Successful connection to results_receiver on port {self.port+4} - {self.results_receiver}")
self.report_socket = self.context.socket(zmq.PUSH)
self.report_socket.bind(f"tcp://127.0.0.1:{self.port+3}")#5559")
logger.debug(f"Successful connection to report_socket on port {self.port+3} - {self.report_socket}")
return [self.results_receiver, self.report_socket]
def zmqMaster(self):
self.masterPULL = self.context.socket(zmq.PULL)
self.masterPULL.bind(f"tcp://127.0.0.1:{self.port}")#5551")
logger.debug(f"Successful connection to masterPULL on port {self.port} - {self.masterPULL}")
self.socketPUB = self.context.socket(zmq.PUB)
self.socketPUB.bind(f"tcp://127.0.0.1:{self.port+1}")#5553")
logger.debug(f"Successful connection to socketPUB on port {self.port+1} - {self.socketPUB}")
return [self.masterPULL]
def generatPoller(self, sockets):
self.poller = zmq.Poller()
for socket in sockets:
self.poller.register(socket, zmq.POLLIN)
logger.debug(f"Register {socket} OK")
def sendToMaster(self, what):
message = {"name": self.who, "state": what}
self.masterPUSH.send_json(json.dumps(message))
logger.info(f"Sent message: {message}")
def gracefullyKill(self, tellMaster = True):
if tellMaster:
self.sendToMaster("quit")
logger.warning(f"Killing process: {self.who}")
# self.masterPUSH.close()
# self.masterSUB.close()
# self.work_receiver.close()
# self.consumer_sender.close()
# self.context.term()
self.context.destroy(linger=0)
logger.warning(f"Process: {self.who} killed!")
def interpretMaster(self):
recv = self.masterSUB.recv_string()
if recv.split(' ')[1] == 'kill':
logger.warning('Closing service, received kill signal from masterSUB')
self.health = False
else:
logger.error(f'Received unknown message from masterSUB: {recv}')
def countFails(self):
self.fails += 1
logger.warning(f"The {self.who}'s poller socket has timedout {self.fails} time(s) in a row.")
if self.fails >= 5:
logger.critical(f"The {self.who}'s poller socket has timedout {self.fails} times in a row and is being killed.")
self.health = False
class KillSwitch(SetupZMQ):
def __init__(self, data):
self.who = "ks"
self.port = data.port
try:
self.connectZMQ()
except Exception as e:
logger.exception(f"Failed to initialise ZMQ: {e}.")
self.health = False
self.gracefullyKill()
def __call__(self):
print("SENDING KILL SIGNAL")
self.gracefullyKill()
class Workers(SetupZMQ):
def __init__(self, data):
self.data = {"state": False}
self.fails = 0
self.who = "worker"
self.port = data.port
try:
self.connectZMQ()
self.generatPoller(self.zmqWorker())
except Exception as e:
logger.exception(f"Failed to initialise ZMQ: {e}.")
self.health = False
self.gracefullyKill()
if self.health:
self.sendToMaster("start")
self.mainLoop()
else:
self.gracefullyKill()
def timeout(timelimit):
def decorator(func):
def decorated(*args, **kwargs):
with futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(func, *args, **kwargs)
try:
result = future.result(timelimit)
except futures.TimeoutError:
logger.error(f'Timeout called on {func.__qualname__}')
result = kwargs.get('timeout_data')
executor._threads.clear()
futures.thread._threads_queues.clear()
return result
return decorated
return decorator
@timeout(30)
def creatSoup(self, work, timeout_data={ "state": False,
"content_type": None,
"response": None,
"oldsoup": "",
"error": "Timeout"}):
typelist = ['text/html']
url, oldsoup = work['url'], work['oldsoup']
try:
response = requests.get(url, headers=config.HEADER)
soup = BeautifulSoup(response.text, 'lxml')
hash = hashlib.md5(str(soup).encode()).hexdigest()
content_type = response.headers.get('content-type')
if hash in oldsoup:
return {"state": False, "content_type": content_type,
"response": None, "oldsoup": oldsoup, "error": None}
else:
oldsoup.append(hash)
if any([tps in content_type for tps in typelist]):
output = {"state": True, "content_type": content_type,
"response": response, "oldsoup": oldsoup, "error": None}
else:
output = {"state": False, "content_type": content_type,
"response": None, "oldsoup": oldsoup, "error": None}
except Exception as e:
output = {"state": False, "content_type": None,
"response": None, "oldsoup": oldsoup, "error": str(e)}
logger.warning(f"Exception hit on url: {url} - error reads: {e}")
return output
def cleanurl(self, url):
parts = urlsplit(url)
base_url = "{0.scheme}://{0.netloc}/".format(parts)
if '/' in parts.path:
path = url[:url.rfind('/')+1]
else:
path = url
return parts, base_url, path
@timeout(30)
def readhtml(self, response, work, timeout_data=[[], []]):
excludeCHAR = ["/", "+", "*", "`", "%", "=", "#", "{", "}", "(", ")", "[",
"]", "'", "domain.com", 'email.com']
new_emails = set(re.findall(config.RGX, response.text, re.I))
falsepos = set()
for email in new_emails:
falsepos.update([email for e in excludeCHAR if e in email])
new_emails -= falsepos
soup = BeautifulSoup(response.text, 'lxml')
if not work['domaines']:
include = []
else:
include = work['domaines']
parts, base_url, path = self.cleanurl(work['url'])
links = []
for anchor in soup.find_all("a"):
if "href" in anchor.attrs:
link = anchor.attrs["href"]
else:
link = ''
if link.startswith('//'):
link = link[2:]
if link.startswith('www.'):
link = "http://"+link
if link.startswith('/'):
link = base_url + link
else:
link = base_url + "/" + link
if not link.startswith('http'):
link = path + link
if not any(ext in link for ext in config.BLACKLIST['URLS']):
if any(inc in link for inc in include):
if link not in work['unscraped'] + work['scraped']:
links.append(link)
return [links, list(new_emails)]
def mainLoop(self):
while self.health:
socks = dict(self.poller.poll(config.TIMEOUT_CONSTANT))
if socks.get(self.work_receiver) == zmq.POLLIN:
work = json.loads(self.work_receiver.recv_json())
output = {
"initUrl": work['url'],
"emaildict": None,
"linksSet": None,
"oldsoup": None,
"empty": True,
"error": False,
}
try:
extension = work['url'].split('.')[-1].lower()
if extension not in config.BLACKLIST['EXTENSIONS'] :
data = self.creatSoup(work)
if data is None:
output['error'] = "data is none, error in creatSoup"
elif data['state']:
linksSet, emailsSet = self.readhtml(data['response'], work)
output = {
"initUrl": work['url'],
"emaildict": [{"email": email, "url": work['url']} for email in emailsSet],
"linksSet": linksSet,
"oldsoup": data['oldsoup'],
"empty": False,
"error": False,
}
else:
output['error'] = data['error']
except Exception as e:
output['error'] = True
logger.exception(f'Exception hit when undertaking job {e}. Work: {data}')
self.consumer_sender.send_json(json.dumps(output))
elif socks.get(self.masterSUB) == zmq.POLLIN:
self.interpretMaster()
else:
self.countFails()
self.gracefullyKill()
class Sink(SetupZMQ):
"""
PARAM :: data - this is a dictionary that has the key <port> for starting the sink.
The Sink class generates a listener to listen for workers, collect their data and
send them back to the producer. We first initialise a few constants. We then extract
the port from the argument <data>. We use the class we inherit from to connect to
the different zmq sockets and generate the poller object. If all this is successful,
we send a "start" message to MASTER and start main loop, otherwise we kill the sink
The main loop is simple. Whilst <health> is true, we listen on the socket. This is none
blocking. We add a large timeout of 1 minute (60e3ms) so allow for fails in case nothing
comes. We allow for 5 fails before breaking setting <self.health> to false hence killing the sink.
If we get a message from <results_receiver>, we take this message, add 1 to our success counter
and send the message at the PRODUCER.
If we get a message from <masterSUB>, we run the interpretMASTER function that only listens
for a kill signal, which sets <self.health> to true if received, otherwise we log the unknown
message.
"""
def __init__(self, data):
self.count = 0
self.fails = 0
self.who = "sink"
self.port = data.port
try:
self.connectZMQ()
self.generatPoller(self.zmqSink())
except Exception as e:
logger.exception(f"Failed to initialise ZMQ: {e}.")
self.health = False
self.gracefullyKill()
if self.health:
self.sendToMaster("start")
self.mainLoop()
else:
self.gracefullyKill()
def mainLoop(self):
while self.health:
socks = dict(self.poller.poll(config.TIMEOUT_CONSTANT))
if socks.get(self.results_receiver) == zmq.POLLIN:
recv = json.loads(self.results_receiver.recv_json())
self.count += 1
recv['count'] = self.count
self.report_socket.send_json(json.dumps(recv))
elif socks.get(self.masterSUB) == zmq.POLLIN:
self.interpretMaster()
else:
self.countFails()
self.gracefullyKill()
class Master(SetupZMQ):
def __init__(self, data):
self.count, self.sink, self.producer = 0, 0, 0
self.fails = 0
self.who = "master"
self.port = data.port
self.workers = data.workers
try:
self.connectZMQ()
self.generatPoller(self.zmqMaster())
except Exception as e:
logger.exception(f"Failed to initialise ZMQ: {e}.")
self.health = False
if self.health:
self.startLoop()
self.mainLoop()
else:
self.gracefullyKill(tellMaster = False)
def pubKillSockets(self, destinations):
for who in destinations:
self.socketPUB.send_string(f"{who} kill")
def startLoop(self):
start_time = time.time()
logger.debug(f"Listening for {self.workers} workers, producer and sink. Timeout set to {self.workers*3}s")
while True:
if time.time() - start_time > self.workers*5:
logger.critical("Processes did not start -- TIMEOUT")
self.health = False
self.pubKillSockets(['producer', 'worker', 'sink'])
break
if self.count < self.workers or not self.sink or not self.producer:
socks = dict(self.poller.poll(1000))
if socks.get(self.masterPULL) == zmq.POLLIN:
recv = json.loads(self.masterPULL.recv_json())
if recv == {'name': 'worker', 'state': 'start'}:
self.count += 1
logger.info(f"{self.count}/{self.workers} worker connected")
elif recv == {'name': 'sink', 'state': 'start'}:
logger.info(f"Sink connected")
self.sink = True
elif recv == {'name': 'producer', 'state': 'start'}:
logger.info(f"Producer connected")
self.producer = True
elif self.count == self.workers and self.sink and self.producer:
logger.info(f"[+] MASTER - Let's go!")
break
def mainLoop(self):
self.socketPUB.send_string("producer go")
while self.health:
socks = dict(self.poller.poll(1000))
if socks.get(self.masterPULL) == zmq.POLLIN:
recv = json.loads(self.masterPULL.recv_json())
if recv == {'name': 'producer', 'state': 'quit'}:
logger.warning(f"[i] MASTER - received quit message from {recv['name']}")
self.pubKillSockets(['worker', 'sink'])
break
elif recv == {'name': 'worker', 'state': 'quit'}:
logger.warning(f"[i] MASTER - received quit message from {recv['name']}")
self.pubKillSockets(['producer', 'worker', 'sink'])
break
elif recv == {'name': 'sink', 'state': 'quit'}:
logger.warning(f"[i] MASTER - received quit message from {recv['name']}")
self.pubKillSockets(['producer', 'worker'])
break
elif recv == {'name': 'ks', 'state': 'quit'}:
logger.warning(f"[i] MASTER - received quit message from {recv['name']}")
self.pubKillSockets(['producer', 'worker', 'sink'])
break
else:
logger.error(f"[?] MASTER - poller triggered but not understood: {recv}")
self.gracefullyKill(tellMaster = False)
class Producer(SetupZMQ):
def __init__(self, data):
self.called = False
self.fails = 0
self.who = "producer"
self.data = data
self.port = data.port
self.work_list = deque([])
self.emaildict = []
self.continueLoop = True
self.scrapedLength = 0
try:
self.connectZMQ()
self.generatPoller(self.zmqProducer()+[self.masterPUSH])
self.startLoop()
except Exception as e:
logger.exception(f"Failed to initialise ZMQ: {e}.")
self.health = False
self.gracefullyKill()
if self.health:
self.mainLoop()
else:
self.gracefullyKill()
def startLoop(self):
logger.debug('[i] PRODUCER - sending ready message to MASTER')
self.sendToMaster("start")
logger.debug('[i] PRODUCER - waiting green light from MASTER')
self.masterSUB.recv_string()
# while self.health:
# socks = dict(self.poller.poll(config.TIMEOUT_CONSTANT))
# if socks.get(self.masterPUSH) == zmq.POLLIN:
# logger.debug('[i] PRODUCER - waiting green light from MASTER')
# self.masterSUB.recv_string()
# else:
# self.countFails()
def mainLoop(self):
logger.info('[i] PRODUCER - starting main loop')
unscraped = deque([self.data.url])
scraped = set()
oldsoup = set()
queue = 0
count = 0
while self.health and self.continueLoop:
try:
self.creatWorkList(unscraped, scraped, oldsoup)
socks = dict(self.poller.poll(100))
if socks.get(self.masterSUB) == zmq.POLLIN:
self.interpretMaster()
elif socks.get(self.get_sink) == zmq.POLLIN:
logger.debug("Receiving data from sink")
sink = json.loads(self.get_sink.recv_json())
queue -= 1
count = sink['count']
if not sink['empty']:
self.emaildict = self.emaildict + sink['emaildict']
for link in sink['linksSet']:
if link not in list(scraped) + list(unscraped):
unscraped.append(link)
logger.info(f"Current scraped {len(scraped)} | Emails found: {len(self.emaildict)}")
self.saveResult(self.emaildict, self.data.outputDir)
elif len(self.work_list):
logger.debug(f'PRODUCER - Consuming queue : {queue} | {len(self.work_list)}')
while queue <= config.QUEUE and len(self.work_list):
work = self.work_list.popleft()
queue += 1
self.zmq_socket.send_json(json.dumps(work))
elif count == len(scraped) and queue == 0 and count > 0:
logger.info(f'PRODUCER - saving results and quitting')
self.masterPUSH.send_json(json.dumps({"name": "producer",
"state": "done"}))
self.saveResult(self.emaildict, self.data.outputDir)
break
except Exception as e:
logger.exception(f'PRODUCER - Big error, sending kill signal. Reason: {e}')
self.saveResult(self.emaildict, self.data.outputDir)
self.health = False
self.gracefullyKill()
def creatWorkList(self, unscraped, scraped, oldsoup):
while len(unscraped) and self.continueLoop:
if len(scraped) >= self.data.limit-1:
self.continueLoop = False
url = unscraped.popleft()
scraped.add(url)
work = {
"url": url,
"oldsoup": list(oldsoup),
"domaines": self.data.domains,
'scraped': list(scraped),
'unscraped': list(unscraped)
}
self.work_list.append(work)
self.scrapedLength = len(scraped)
def saveResult(self, dict, dir):
df = pd.DataFrame(dict, columns=["email", "url"])
try:
df.to_csv(os.path.join(dir, "email_list.csv"), index=False)
logger.debug(f'Emails cached: {len(df)}')
except Exception as e:
logger.error(f'Could note save dfs: {e}')
def workYouBastard(self):
return self.emaildict, self.scrapedLength
class setParam():
def __init__(self, param):
# Format URL
self.urlsplit = urlsplit(param['url'])
self.url = f"{self.urlsplit.scheme}://{self.urlsplit.netloc}"
self.domains = param['domain']
if __name__ == '__main__':
self.folder = self.domains[0]
else:
self.folder = param['reference'].replace("-", "_")
# Set constats
self.workers = param['workers']
# Set limit
self.limit = param['limit']
self.port = 5000 + param['port']
# set Output
self.cwd = os.getcwd()
self.listdir = os.listdir(self.cwd)
self.outputDir = os.path.join(self.cwd, 'engine/cache', self.folder)
if not os.path.exists(self.outputDir):
os.makedirs(self.outputDir)
def processMaster(data):
master = Master(data)
def processWorker(data):
worker = Workers(data)
def processSink(data):
sink = Sink(data)
def main(param):
KILLING = False
KILLING2 = False
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
data = setParam(param=param)
if __name__ == "__main__":
printParam(data)
logger.critical(f"######################### NEW JOB: {data.folder} #########################")
# file_handler_job = logging.FileHandler(os.path.join(data.outputDir, 'log'))
# file_handler_job.setLevel(logging.INFO)
# file_handler_job.setFormatter(formatter)
# logger.addHandler(file_handler_job)
processes = []
p = Process(target=processMaster, args=(data,), name="MASTER")
p.start()
processes.append(p)
logger.info('[i] MASTER started')
for n in range(data.workers):
p = Process(target=processWorker, args=(data,), name="WORKER")
p.start()
processes.append(p)
logger.info(f'[i] WORKER {n+1}/{data.workers} started')
p = Process(target=processSink, args=(data,), name="SINK")
p.start()
processes.append(p)
logger.info(f'[i] Sink started')
ksObj = KillSwitch(data)
def killswitch(a,b ):
ksObj()
ksObj()
signal.signal(signal.SIGINT, killswitch)
emails, nmbPgsScraped = Producer(data).workYouBastard()
logger.info("[i] Finalising, \t")
for p in processes:
p.terminate()
logger.info('[i] Done')
return emails, nmbPgsScraped
def welcome():
logo = f"""
$$$$$$$$ $$$$ $$$$ $$$$ $$$$$$ $$$$$$$ $$$$ $$$$ $$$$$$ $$$$$
$$$$$$$$$ $$$$ $$$$ $$$$ $$$$$$$$ $$$$$$$$ $$$$ $$$$ $$$$$$$$$ $$$$$$$
$$$$ $$$$ $$$$ $$$$ $$$$ $$$$ $$$$ $$$$ $$$ $$$$ $$$$ $$$$ $$$$
$$$$$$$$ $$$$ $$$$ $$$$ $$$$$$$$$$ $$$$$$$$ $$$$ $$$$ $$$$ $$$$$$ $$$$$$$
$$$$ $$$$ $$$$ $$$$ $$$$ $$$$ $$$$$$$ $$$$ $$$$ $$$$ $$$$$ $$$$ $$
$$$$$$$$$ $$$$$$$ $$$$$$$$ $$$$ $$$ $$$$$$$$ $$$$$$$$$$ $$$$$$$ $$$$
$$$$$$$$ $$$$$$$ $$$$$$ $$$$$$ $$$ $$$$$$ $$$$$$$ $$$$$ $$
v{config.VERSION} guanicoe
"""
print(logo)
def printParam(parameters):
param = f"""
###########################################################################
Base URL: {parameters.url}
Domains : {parameters.domains}
Number workers: {parameters.workers}
Limit set: {parameters.limit}
Output: {parameters.outputDir}
###########################################################################
"""
print(param)
if __name__ == '__main__':
welcome()
parser = argparse.ArgumentParser(description="""
This small utility script was made to crawl websites for email addresses.
It uses multiprocessing threads to get multiple workers to scrape the web pages,
extract emails and links, and dumps them in a *.csv file.
""")
parser.add_argument('-u', '--url', type=str,
required=True, help='Url to crawl')
parser.add_argument('-d', '--domain', nargs="+", default=False,
required=True, help="""Domain name to keep in scope (ex: -d domain1,
domain2). The first domain will be used as name
for output. """
)
parser.add_argument('-w', '--workers', type=int, default=10,
help='Number of workers (default: 10)')
parser.add_argument('-l', '--limit', type=int, default=1000,
help="""Limite the number of pages to crawl
(default: 1000)"""
)
parser.add_argument('-o', '--output-dir', type=str,
help="""Specify which directory to save the date.
(default is URL)"""
)
parser.add_argument('--version', action='version',
version=config.VERSION, help='Returns the version number')
args = parser.parse_args()
param = {
"url": args.url,
"domain": args.domain,
"reference": str(uuid.uuid4()),
"workers": args.workers,
"limit": args.limit,
"port": 0,
}
emails = main(param)
print(emails)
|
test_run_example.py
|
'''
This test tests whether starting a `run_ogcore_example.py` run of the model does
not break down (is still running) after 5 minutes or 300 seconds.
'''
import multiprocessing
import time
import os
import sys
import importlib.util
from pathlib import Path
import pytest
def call_run_ogcore_example():
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
roe_fldr = os.path.join(path.parent.parent, "run_examples")
roe_file_path = os.path.join(roe_fldr, "run_ogcore_example.py")
spec = importlib.util.spec_from_file_location(
'run_ogcore_example.py', roe_file_path)
roe_module = importlib.util.module_from_spec(spec)
sys.modules['run_ogcore_example.py'] = roe_module
spec.loader.exec_module(roe_module)
roe_module.main()
@pytest.mark.local
def test_run_ogcore_example(f=call_run_ogcore_example):
p = multiprocessing.Process(
target=f, name="run_ogcore_example", args=())
p.start()
time.sleep(300)
if p.is_alive():
p.terminate()
p.join()
timetest = True
else:
print("run_ogcore_example did not run for minimum time")
timetest = False
print('timetest ==', timetest)
assert timetest
|
run_search.py
|
import argparse
from timeit import default_timer as timer
from aimacode.search import InstrumentedProblem
from aimacode.search import (breadth_first_search, astar_search,
breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,
greedy_best_first_graph_search, depth_limited_search,
recursive_best_first_search)
from my_air_cargo_problems import air_cargo_p1, air_cargo_p2, air_cargo_p3
from threading import Thread
import functools
from os.path import join
from pickle import dump
PROBLEM_CHOICE_MSG = """
Select from the following list of air cargo problems. You may choose more than
one by entering multiple selections separated by spaces.
"""
SEARCH_METHOD_CHOICE_MSG = """
Select from the following list of search functions. You may choose more than
one by entering multiple selections separated by spaces.
"""
INVALID_ARG_MSG = """
You must either use the -m flag to run in manual mode, or use both the -p and
-s flags to specify a list of problems and search algorithms to run. Valid
choices for each include:
"""
_TIMEOUT = 600
PROBLEMS = [["Air Cargo Problem 1", air_cargo_p1],
["Air Cargo Problem 2", air_cargo_p2],
["Air Cargo Problem 3", air_cargo_p3]]
SEARCHES = [["breadth_first_search", breadth_first_search, ""],
['breadth_first_tree_search', breadth_first_tree_search, ""],
['depth_first_graph_search', depth_first_graph_search, ""],
['depth_limited_search', depth_limited_search, ""],
['uniform_cost_search', uniform_cost_search, ""],
['recursive_best_first_search', recursive_best_first_search, 'h_1'],
['greedy_best_first_graph_search', greedy_best_first_graph_search, 'h_1'],
['astar_search_h_1', astar_search, 'h_1'],
['astar_search_h_ignore_preconditions', astar_search, 'h_ignore_preconditions'],
['astar_search_h_pg_levelsum', astar_search, 'h_pg_levelsum'],
]
class PrintableProblem(InstrumentedProblem):
""" InstrumentedProblem keeps track of stats during search, and this
class modifies the print output of those statistics for air cargo
problems.
"""
def __repr__(self):
return '{:^10d} {:^10d} {:^10d}'.format(self.succs, self.goal_tests, self.states)
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
print('error starting thread')
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
def search_timeout(ip, search_function, parameter=None):
try:
start = timer()
if parameter is not None:
result = timeout(timeout=_TIMEOUT)(search_function)(ip, parameter)
else:
result = timeout(timeout=_TIMEOUT)(search_function)(ip)
end = timer()
elapsed = end - start
except:
result = None
elapsed = _TIMEOUT
return result, elapsed
def run_search(problem, search_function, parameter=None):
ip = PrintableProblem(problem)
node, elapsed = search_timeout(ip, search_function, parameter)
print("\nExpansions Goal Tests New Nodes")
print("{}\n".format(ip))
show_solution(node, elapsed)
print()
log = {
'expansions':ip.succs,
'goal_tests':ip.goal_tests,
'new_nodes':ip.states,
'elapsed_time':elapsed,
'solution':['{}{}'.format(action.name, action.args) for action in node.solution()] if node else None
}
return log
def manual():
print(PROBLEM_CHOICE_MSG)
for idx, (name, _) in enumerate(PROBLEMS):
print(" {!s}. {}".format(idx+1, name))
p_choices = input("> ").split()
print(SEARCH_METHOD_CHOICE_MSG)
for idx, (name, _, heuristic) in enumerate(SEARCHES):
print(" {!s}. {} {}".format(idx+1, name, heuristic))
s_choices = input("> ").split()
main(p_choices, s_choices)
print("\nYou can run this selection again automatically from the command " +
"line\nwith the following command:")
print("\n python {} -p {} -s {}\n".format(__file__,
" ".join(p_choices),
" ".join(s_choices)))
def main(p_choices, s_choices):
problems = [PROBLEMS[i-1] for i in map(int, p_choices)]
searches = [SEARCHES[i-1] for i in map(int, s_choices)]
log = {p[0]:{s[0]:{} for s in searches} for p in problems}
for pname, p in problems:
for sname, s, h in searches:
hstring = h if not h else " with {}".format(h)
print("\nSolving {} using {}{}...".format(pname, sname, hstring))
_p = p()
_h = None if not h else getattr(_p, h)
log[pname][sname] = run_search(_p, s, _h)
# Write file into file path.
with open('benchmark.pkl', 'wb') as f:
dump(log, f)
def show_solution(node, elapsed_time):
if node is None:
print("The selected planner did not find a solution for this problem. " +
"Make sure you have completed the AirCargoProblem implementation " +
"and pass all unit tests first.")
else:
print("Plan length: {} Time elapsed in seconds: {}".format(len(node.solution()), elapsed_time))
for action in node.solution():
print("{}{}".format(action.name, action.args))
# if __name__=="__main__":
# parser = argparse.ArgumentParser(description="Solve air cargo planning problems " +
# "using a variety of state space search methods including uninformed, greedy, " +
# "and informed heuristic search.")
# parser.add_argument('-m', '--manual', action="store_true",
# help="Interactively select the problems and searches to run.")
# parser.add_argument('-p', '--problems', nargs="+", choices=range(1, len(PROBLEMS)+1), type=int, metavar='',
# help="Specify the indices of the problems to solve as a list of space separated values. Choose from: {!s}".format(list(range(1, len(PROBLEMS)+1))))
# parser.add_argument('-s', '--searches', nargs="+", choices=range(1, len(SEARCHES)+1), type=int, metavar='',
# help="Specify the indices of the search algorithms to use as a list of space separated values. Choose from: {!s}".format(list(range(1, len(SEARCHES)+1))))
# args = parser.parse_args()
#
# if args.manual:
# manual()
# elif args.problems and args.searches:
# main(list(sorted(set(args.problems))), list(sorted(set((args.searches)))))
# else:
# print()
# parser.print_help()
# print(INVALID_ARG_MSG)
# print("Problems\n-----------------")
# for idx, (name, _) in enumerate(PROBLEMS):
# print(" {!s}. {}".format(idx+1, name))
# print()
# print("Search Algorithms\n-----------------")
# for idx, (name, _, heuristic) in enumerate(SEARCHES):
# print(" {!s}. {} {}".format(idx+1, name, heuristic))
# print()
# print("Use manual mode for interactive selection:\n\n\tpython run_search.py -m\n")
if __name__ == '__main__':
main(['1', '2', '3'], ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'])
|
auth.py
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import bottle
import os
from threading import Thread, Event
import webbrowser
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, make_server
from boxsdk import OAuth2
CLIENT_ID = 'g5llkevn2tlidg9gplfc2mpsuvl0ht0k' # Insert Box client ID here
CLIENT_SECRET = 'Ry9GxUkf9sSS43Vn9PIGPELEVfh4nEWT' # Insert Box client secret here
def authenticate(oauth_class=OAuth2):
class StoppableWSGIServer(bottle.ServerAdapter):
def __init__(self, *args, **kwargs):
super(StoppableWSGIServer, self).__init__(*args, **kwargs)
self._server = None
def run(self, app):
server_cls = self.options.get('server_class', WSGIServer)
handler_cls = self.options.get('handler_class', WSGIRequestHandler)
self._server = make_server(self.host, self.port, app, server_cls, handler_cls)
self._server.serve_forever()
def stop(self):
self._server.shutdown()
auth_code = {}
auth_code_is_available = Event()
local_oauth_redirect = bottle.Bottle()
@local_oauth_redirect.get('/')
def get_token():
auth_code['auth_code'] = bottle.request.query.code
auth_code['state'] = bottle.request.query.state
auth_code_is_available.set()
local_server = StoppableWSGIServer(host='localhost', port=8080)
server_thread = Thread(target=lambda: local_oauth_redirect.run(server=local_server))
server_thread.start()
oauth = oauth_class(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
)
auth_url, csrf_token = oauth.get_authorization_url('http://localhost:8080')
webbrowser.open(auth_url)
auth_code_is_available.wait()
local_server.stop()
assert auth_code['state'] == csrf_token
access_token, refresh_token = oauth.authenticate(auth_code['auth_code'])
print('access_token: ' + access_token)
print('refresh_token: ' + refresh_token)
return oauth, access_token, refresh_token
if __name__ == '__main__':
authenticate()
os._exit(0)
|
server.py
|
#!/bin/python3
'''
This starts the socket server to which things connect to play the game
'''
import socketserver
import socket # pylint: disable=unused-import
import threading
import time
import random
import sys
import logging
import os.path
try:
import ujson as json
except:
import json
import battlecode as bc
NUM_PLAYERS = 4
PKEYS = {
int(bc.Planet.Earth): {
int(bc.Team.Red): 0,
int(bc.Team.Blue): 1,
},
int(bc.Planet.Mars): {
int(bc.Team.Red): 2,
int(bc.Team.Blue): 3,
}
}
def _key(p):
p = p['player']
return PKEYS[int(p.planet)][int(p.team)]
BUILD_TIMEOUT = 60
TIMEOUT = 50 # seconds
class TimeoutError(Exception):
pass
class Game(object): # pylint: disable=too-many-instance-attributes
'''
This function contains the game information, and is started at the begining
of the process
It handles talking to the rust engine, and sending data to the client.
This class also processes the received data from the client, but the actual
reception is done by the ReceiveHandler and socket server
'''
def __init__(self, game_map: bc.GameMap, logging_level=logging.DEBUG,
logging_file="server.log", time_pool=10000, time_additional=50,
terminal_viewer=False, map_name="unknown",
extra_delay=0):
self.terminal_viewer = terminal_viewer
self.extra_delay = extra_delay
self.time_pool = time_pool/1000.
self.time_additional = time_additional/1000.
logging.basicConfig(filename=logging_file, level=logging_level)
'''
Initialize Game object
Args:
num_players: Number of players
state: Start state of game (Note can be snapshot
'''
self.players = [] # Array containing the player ids
# Dict taking player id and giving bool of log in
self.player_logged = {}
# Dict taking player id and giving amount of time left as float
self.times = {}
self.disconnected = False
# Initialize the players
for index in range(NUM_PLAYERS):
new_id = random.randrange(10**30)
self.players.append({'id':new_id})
self.players[-1]['player'] = bc.Player(bc.Team.Red if index % 2 == 0 else bc.Team.Blue, bc.Planet.Earth if index < 2 else bc.Planet.Mars)
self.players[-1]['running_stats'] = {
"tl": time_pool,
"atu": 0,
"lng": "?",
"bld": True
}
self.players[-1]['built_successfully'] = False
self.player_logged[new_id] = False
self.times[new_id] = self.time_pool
self.started = False
self.game_over = False
# Lock thread running player should hold
self.current_player_index = 0
self.turn_events = [threading.Event() for _ in range(len(self.players))]
self.map = game_map
self.manager = bc.GameController.new_manager(self.map)
for player in self.players:
player['start_message'] = self.manager.start_game(player['player']).to_json()
self.viewer_messages = []
manager_start_message = self.manager.initial_start_turn_message(int(1000 * self.time_pool))
self.manager_viewer_messages = []
self.manager_viewer_messages.append(self.manager.manager_viewer_message())
self.last_message = manager_start_message.start_turn.to_json()
self.viewer_messages.append(manager_start_message.viewer.to_json())
self.initialized = 0
self.map_name = map_name
self.start_time = time.time()
def state_report(self):
name = self.map_name
if '/' in name:
name = name[name.rfind('/') + 1:]
if '.' in name:
name = name[:name.find('.')]
game = {
"id": 0, #unknown
"map": name,
"round": self.manager.round(),
"time": int((time.time() - self.start_time) * 1000),
"red": {
"id": 0,
},
"blue": {
"id": 0,
}
}
for player in self.players:
p = player["player"]
t = "red" if p.team == bc.Team.Red else "blue"
p = "earth" if p.planet == bc.Planet.Earth else "mars"
game[t][p] = player["running_stats"]
return game
def player_id2index(self, client_id):
for i in range(len(self.players)):
if self.players[i]['id'] ==client_id:
return i
raise Exception("Invalid id")
def get_player(self, client_id):
return self.players[self.player_id2index(client_id)]
@property
def num_log_in(self):
'''
Returns the number of people who have been logged in
'''
total = 0
for key in self.player_logged:
if self.player_logged[key]:
total += 1
return total
def verify_login(self, unpacked_data: str):
'''
This function verifies the login and then logins in the player code.
Adds them to the game state
Args:
data: A socket that we received data from the client on
Return:
Boolean if login was successful
'''
client_id = int(unpacked_data['client_id'])
# Check if they are in our list of clients
if client_id not in [player['id'] for player in self.players]:
return "Client id Mismatch"
# Check if they logged in already
if self.player_logged[client_id]:
return "Already Logged In"
self.player_logged[client_id] = True
# Check if all the players are logged in and then start the game
logging.info("Player logged in: %s", self.player_logged)
if len(self.players) == self.num_log_in:
self.start_game()
return client_id
def set_player_turn(self, player_index):
self.current_player_index = player_index
self.turn_events[player_index].set()
def start_game(self):
'''
This code handles starting the game. Anything that is meant to be
triggered when a game starts is stored here.
'''
if self.terminal_viewer and sys.platform != 'win32':
# Clear the entire screen
sys.stdout.write("\033[2J")
# Init the player who starts and then tell everyone we started
self.current_player_index = 0
self.set_player_turn(self.current_player_index)
self.started = True
return
def end_turn(self):
'''
This function handles the release of all locks and moving the player to
the next turn. It also handles sleeping the docker instances.
Args:
client_id: The int of the client that this thread is related to
'''
if self.terminal_viewer:
if sys.platform == 'win32':
# Windows terminal only supports escape codes starting from Windows 10 in the 'Threshold 2' update.
# So fall back to other commands to ensure compatibility
os.system('cls')
else:
# Move the cursor to coordinate (0,0) on the screen.
# Compared the clearing the entire screen, this reduces flicker.
# See https://en.wikipedia.org/wiki/ANSI_escape_code
sys.stdout.write("\033[0;0H")
# os.system('clear')
print('[rnd: {}] [rK: {}] [bK: {}]'.format(
self.manager.round(),
self.manager.manager_karbonite(bc.Team.Red),
self.manager.manager_karbonite(bc.Team.Blue),
))
self.manager.print_game_ansi()
if sys.platform != 'win32':
# Clear the screen from the cursor to the end of the screen.
# Just in case some text has been left over there from earlier frames.
sys.stdout.write("\033[J")
for player in sorted(self.players, key=_key):
p = player['player']
print('-- [{}{}] --'.format('e' if p.planet == bc.Planet.Earth else 'm', 'r' if p.team == bc.Team.Red else 'b'))
#logs = player['logger'].logs.getvalue()[-1000:].splitlines()[-5:]
#for line in logs:
# print(line)
if self.extra_delay:
import time
time.sleep(self.extra_delay / 1000.)
# Increment to the next player
self.current_player_index = (self.current_player_index + 1) % len(self.players)
self.set_player_turn(self.current_player_index)
def get_viewer_messages(self):
'''
A generator for the viewer messages
'''
# TODO check this works with the way the engine works
max_yield_item = 0
while not self.game_over or max_yield_item != len(self.viewer_messages):
if len(self.viewer_messages) > max_yield_item:
new_max = len(self.viewer_messages)
for i in range(max_yield_item, new_max):
yield self.viewer_messages[i]
max_yield_item = new_max
time.sleep(0.1)
def start_turn(self, client_id: int):
'''
This is a blocking function that waits until it client_id's turn to
start the game. It attempts to take the game lock and then checks to see
if the client_id matches the next player id. If it does it returns and
the player can start running.
This also handles waking the docker instances to start computing
'''
logging.debug("Client %s: entered start turn", client_id)
exit_well = False
player_index = self.player_id2index(client_id)
while not self.game_over:
if self.turn_events[player_index].wait(timeout=0.1):
self.turn_events[player_index].clear()
assert(self.current_player_index == player_index)
self.times[client_id] += self.time_additional
return True
return False
def make_action(self, turn_message: bc.TurnMessage, client_id: int, diff_time: float):
'''
Take action data and give it to the engine
Args:
data: the data received from the stream
'''
# get the time left of the next player to go
next_index = (self.player_id2index(client_id) + 1) % len(self.players)
next_client_id = self.players[next_index]['id']
projected_time_ms = int(1000 * (self.times[next_client_id] + self.time_additional))
# interact with the engine
application = self.manager.apply_turn(turn_message, projected_time_ms)
self.last_message = application.start_turn.to_json()
self.viewer_messages.append(application.viewer.to_json())
self.manager_viewer_messages.append(self.manager.manager_viewer_message())
self.times[client_id] -= diff_time
return
def create_receive_handler(game: Game, dockers, use_docker: bool,
is_unix_stream: bool) \
-> socketserver.BaseRequestHandler:
'''
Create a Class that will be used a receive handler
Args:
game: The game the receive handler should operate on
dockers: A map of the docker files with the key being
use_docker: if True sleep and wake with docker otherwise don't use
docker. Useful for testing the socket server
Return:
A ReceiveHandler class
'''
class ReceiveHandler(socketserver.BaseRequestHandler):
'''
This class overrides the default handling method in socketServer, so it
calls what we want
'''
def __init__(self, *args, **kwargs):
'''
Hidden init
'''
self.game = game
self.dockers = dockers
self.client_id = 0
self.error = ""
self.logged_in = False
self.is_unix_stream = is_unix_stream
super(ReceiveHandler, self).__init__(*args, **kwargs)
def get_next_message(self) -> object:
'''
Returns the json loaded object of the next string that is sent over the
socket
Returns:
An object, for our purposes this will be a dictionary, of the json
loaded string
'''
recv_socket = self.request
game = self.game
wrapped_socket = recv_socket.makefile('rwb', 1)
logging.debug("Client %s: Waiting for next message", self.client_id)
try:
data = next(wrapped_socket)
except (StopIteration, IOError):
print("{} has not sent message for {} seconds, assuming they're dead".format(
self.game.get_player(self.client_id)['player'],
TIMEOUT
))
wrapped_socket.close()
recv_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise TimeoutError()
except KeyboardInterrupt:
wrapped_socket.close()
recv_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise KeyboardInterrupt()
finally:
wrapped_socket.close()
data = data.decode("utf-8").strip()
return data
#unpacked_data = json.loads(data)
#return unpacked_data
def send_message(self, obj: object) -> None:
'''
Sends newline delimited message to socket
The object desired to be sent will be converted to a json and then encoded
and sent.
Args:
Obj: The object that wants to be serialized and sent over
Returns:
None
'''
send_socket = self.request
if isinstance(obj, bytes):
obj = obj.decode()
message = obj + "\n"
encoded_message = message.encode()
logging.debug("Client %s: Sending message %s", self.client_id,
encoded_message)
wrapped_socket = send_socket.makefile('rwb', 1)
try:
wrapped_socket.write(encoded_message)
except IOError:
wrapped_socket.close()
send_socket.close()
print("{} has not accepted message for {} seconds, assuming they're dead".format(
[p for p in self.game.players if p['id'] == self.client_id][0]['player'],
TIMEOUT
))
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise TimeoutError()
except KeyboardInterrupt:
wrapped_socket.close()
send_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise KeyboardInterrupt()
finally:
wrapped_socket.close()
return
def message(self, state_diff):
'''
Compress the current state into a message that will be sent to the
client
'''
if self.error == "":
error = "null"
else:
self.docker.destroy()
if state_diff == "":
state_diff = '""'
if isinstance(state_diff, bytes):
state_diff = state_diff.decode()
if self.logged_in:
logged_in = "true"
else:
logged_in = "false"
message = '{{"logged_in":{},"client_id":"{}","error":{},"message":{}}}'.format(logged_in, self.client_id, error, state_diff)
return message
def player_handler(self):
'''
This is the handler for socket connections from players
'''
self.logged_in = False
logging.debug("Client connected to server")
self.request.settimeout(TIMEOUT)
TIMEDOUTLOG = False
# Handle Login phase
while not self.logged_in and not self.game.game_over:
# do the json parsing ourself instead of handing it off to rust
unpacked_data = json.loads(self.get_next_message())
verify_out = self.game.verify_login(unpacked_data)
self.error = ""
if not isinstance(verify_out, int):
self.error = verify_out
logging.warning("Client failed to log in error: %s",
self.client_id)
else:
logging.info("Client %s: logged in succesfully", self.client_id)
self.logged_in = True
self.client_id = verify_out
self.game.get_player(self.client_id)['built_successfully'] = True
log_success = self.message("")
self.send_message(log_success)
if self.game.game_over:
return
logging.debug("Client %s: Spinning waiting for game to start",
self.client_id)
while not self.game.started and not self.game.game_over:
# Spin while waiting for game to start
time.sleep(0.05)
logging.info("Client %s: Game started", self.client_id)
my_sandbox = dockers[self.client_id]
running_stats = self.game.get_player(self.client_id)['running_stats']
# average time used, in seconds
atu = 0
while self.game.started and not self.game.game_over:
# This is the loop that the code will always remain in
# Blocks until it this clients turn
if not self.game.start_turn(self.client_id):
self.request.close()
return
if self.game.manager.is_over():
self.game.game_over = True
self.game.end_turn()
self.request.close()
return
logging.debug("Client %s: Started turn", self.client_id)
if self.game.initialized > 3:
start_turn_msg = self.message(self.game.last_message)
else:
state_diff = self.game.players[self.game.current_player_index]['start_message']
start_turn_msg = self.message(state_diff)
running_stats["lng"] = my_sandbox.guess_language()
running_stats["bld"] = False
if self.game.initialized <= 3:
my_sandbox.unpause()
self.send_message(start_turn_msg)
self.game.initialized += 1
self.game.end_turn()
continue
if self.game.times[self.client_id] > 0:
my_sandbox.unpause()
start_time = time.perf_counter()
self.send_message(start_turn_msg)
data = self.get_next_message()
end_time = time.perf_counter()
diff_time = end_time-start_time
my_sandbox.pause()
try:
sent_message = bc.SentMessage.from_json(data)
except Exception as e:
print("Error deserializing JSON")
print(e)
print("Killing player...")
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
self.game.disconnected = True
self.game.game_over = True
assert int(sent_message.client_id) == self.client_id, \
"Wrong client id: {}, should be: {}".format(sent_message.client_id, self.client_id)
turn_message = sent_message.turn_message
else:
if not TIMEDOUTLOG:
TIMEDOUTLOG = True
self.game.players[self.game.current_player_index]['logger'](b'PLAYER HAS TIMED OUT!!!')
# 1 second; never let them play again
diff_time = 1
turn_message = bc.TurnMessage.from_json('{"changes":[]}')
atu = atu * .9 + diff_time * .1
# convert to ms
running_stats["tl"] = int(self.game.times[self.client_id] * 1000)
running_stats["atu"] = int(atu * 1000)
self.game.make_action(turn_message, self.client_id, diff_time)
self.game.end_turn()
def viewer_handler(self):
'''
This handles the connection to the viewer
'''
for message in self.game.get_viewer_messages():
# TODO check this schema works for the viewer
self.send_message(message)
def handle(self):
'''
This does all the processing of the data we receive and we spend our
time in this function.
'''
if self.is_unix_stream:
try:
self.player_handler()
except TimeoutError:
return
else:
self.viewer_handler()
return ReceiveHandler
def start_server(sock_file: str, game: Game, dockers, use_docker=True) -> socketserver.BaseServer:
'''
Start a socket server for the players to connect to
Args:
sock_file: This is a string name of the file that will be used for
as UnixStream
game: The game information that is being run
use_docker bool: whether to use docker or not
Return:
server_thread: The connection so it can be closed by parent functions at
the appropriate time
'''
# Create handler for mangaing each connections to server
receive_handler = create_receive_handler(game, dockers, use_docker, True)
if isinstance(sock_file, tuple):
# tcp port
server = socketserver.ThreadingTCPServer(sock_file, receive_handler)
else:
server = socketserver.ThreadingUnixStreamServer(sock_file, receive_handler)
def wait_for_connections():
time.sleep(BUILD_TIMEOUT)
for player in game.players:
if not player['built_successfully']:
print('Player failed to connect to manager after',BUILD_TIMEOUT,'seconds:', player['player'])
if bc.Team.Red == player['player'].team:
game.winner = 'player2'
else:
game.winner = 'player1'
game.disconnected = True
game.game_over = True
server_thread = threading.Thread(target=server.serve_forever, daemon=True)
logging.info("Server Started at %s", sock_file)
server_thread.start()
waiter_thread = threading.Thread(target=wait_for_connections, daemon=True)
waiter_thread.start()
return server
def start_viewer_server(port: int, game: Game) -> socketserver.BaseServer:
'''
Start a socket server for the players to connect to
Args:
port: port to connect to viewer on
game: The game information that is being run
use_docker bool: whether to use docker or not
Return:
server_thread: The connection so it can be closed by parent functions at
the appropriate time
'''
# Create handler for mangaing each connections to server
receive_handler = create_receive_handler(game, {}, False, False)
# Start server
server = socketserver.ThreadingTCPServer(('localhost', port), receive_handler)
server_thread = threading.Thread(target=server.serve_forever, daemon=True)
server_thread.start()
return server
|
playlist.py
|
import threading
import vlc
import gplaylist as grayp
import pafy
import sys
from os import system,name
import requests
import urllib
class GPlayer:
def __init__(self):
self.lnks = set()
self.isPlaying=False
self.audvol = 150
self.flag = True
self.Instance=Instance = vlc.Instance('--input-repeat=-1', '--fullscreen', '--mouse-hide-timeout=0')
self.player=Instance.media_player_new()
self.lnks = grayp.GetQueue().get()
self.t3=threading.Thread(target=self.getOptions)
def setVol(self, vol):
self.audvol = vol
def setdat(self,lnk):
dat=pafy.new(lnk)
self.dat=dat
self.setMedia(dat)
def setMedia(self,dat):
self.media=self.Instance.media_new(dat.getbestaudio().url)
self.media.get_mrl()
def getVol(self):
return self.audvol
def getOptions(self):
while True:
inp=input('...>')
if inp == 'pause':
self.player.set_pause(1)
elif inp == 'help':
print('skip : for next song\npause : for pausing playback\nresume : for resuming playback\nfff : to skip forward 5 seconds\nffb : to skip backwards 5 seconds\nvol <volume 0-100> : for setting desired volume\ngvol : to get current audio volume\nmute : to mute audio\nunmute : to unmute audio\nstop : to quit'+
'aud : to download audio\nvid : to download video')
elif inp == 'resume':
self.player.set_pause(0)
elif inp == 'aud':
audth=threading.Thread(target=self.downloada)
audth.start()
elif inp == 'aud':
vidth=threading.Thread(target=self.downloadv)
vidth.start()
elif inp == 'fff':
c = self.player.get_time()
self.player.set_time(c+1000*5)
elif inp == 'ffb':
c = self.player.get_time()
if c-5000 < 0:
self.player.set_time(0)
else:
self.player.set_time(c-1000*5)
elif inp.startswith('vol'):
vol = int(inp.split(' ')[1])
self.audvol = vol
self.player.audio_set_volume(self.audvol)
elif inp == 'gvol':
print(self.player.audio_get_volume())
elif inp == 'mute':
self.player.audio_set_mute(True)
elif inp == 'unmute':
self.player.audio_set_mute(False)
elif inp=='skip':
self.player.stop()
self.isPlaying=False
elif inp == 'info':
print(self.dat.title)
ratio = (self.player.get_time()//1000)/self.dat.length
ratio = ratio*10
ratio = int(ratio)
sec = self.player.get_time()//1000
mint = sec//60
sec = sec-mint*60
print(str(mint)+':'+str(sec), end='')
for i in range(10):
if i == ratio:
print('>.', end='')
else:
print('..', end='')
sec = self.dat.length
mint = sec//60
sec = sec-mint*60
print(str(mint)+':'+str(sec), end='')
print()
elif inp == 'clr':
if name == 'nt':
_ = system('cls')
elif name == 'posix':
_ = system('clear')
else:
print('Sorry Clear Screen not supported for your OS : '+name)
elif inp=='stop':
print('Stopping the Script...')
self.player.stop()
sys.exit('USER CHOSE TO QUIT')
exit(0)
def examplea(self,lnks):
#print(lnks)
self.flag=True
print('help for CONTROLS')
self.t3.start()
for song in self.lnks:
try:
self.setdat(song)
print('CURRENTLY PLAYING : ',)
print(self.dat)
print('FROM YOUR FAVORITE ARTISTS')
self.player.set_media(self.media)
self.player.audio_set_volume(self.getVol())
self.player.play()
self.isPlaying=True
#duration = player.get_length() / 1000
#mm, ss = divmod(duration, 60)
while self.isPlaying:
if self.player.get_state()==vlc.State.Ended:
self.isPlaying=False
self.player.stop()
break
if self.t3.isAlive() == False:
self.isPlaying=False
sys.exit('USER CHOSE TO EXIT..\n'+'Script might still wait for the download to finish...')
break
#print('CAME OUT WHILE')
except Exception as ex:
print(ex)
continue
#print('CAME OUT EXCEPT')
continue
self.t3.join()
print('QUEUE ENDED')
def getLinks(self):
return self.lnks
def downloada(self):
ba = self.dat.getbestaudio()
print("Size : "+str(ba.get_filesize())+' bytes')
filename = ba.download()
def downloadv(self):
ba = self.dat.getbest()
print("Size : "+str(ba.get_filesize())+' bytes')
filename = ba.download()
ob=GPlayer()
print('PLAYING YOUR QUEUE...')
lnks=ob.getLinks()
if len(lnks)>0:
ob.examplea(lnks)
else:
print('NO SONGS FOUND IN YOUR PLAYLIST')
|
test_telnetlib.py
|
import socket
import telnetlib
import time
import Queue
import unittest
from unittest import TestCase
from test import test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
EOF_sigil = object()
def server(evt, serv, dataq=None):
""" Open a tcp server in three steps
1) set evt to true to let the parent know we are ready
2) [optional] if is not False, write the list of data from dataq.get()
to the socket.
"""
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
if dataq:
data = ''
new_data = dataq.get(True, 0.5)
dataq.task_done()
for item in new_data:
if item == EOF_sigil:
break
if type(item) in [int, float]:
time.sleep(item)
else:
data += item
written = conn.send(data)
data = data[written:]
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
def _read_setUp(self):
self.evt = threading.Event()
self.dataq = Queue.Queue()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock, self.dataq))
self.thread.start()
self.evt.wait()
def _read_tearDown(self):
self.thread.join()
class ReadTests(TestCase):
setUp = _read_setUp
tearDown = _read_tearDown
# use a similar approach to testing timeouts as test_timeout.py
# these will never pass 100% but make the fuzz big enough that it is rare
block_long = 0.6
block_short = 0.3
def test_read_until_A(self):
"""
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_until_B(self):
# test the timeout - it does NOT raise socket.timeout
want = ['hello', self.block_long, 'not seen', EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_until('not seen', self.block_short)
self.assertEqual(data, want[0])
self.assertEqual(telnet.read_all(), 'not seen')
def test_read_until_with_poll(self):
"""Use select.poll() to implement telnet.read_until()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
if not telnet._has_poll:
raise unittest.SkipTest('select.poll() is required')
telnet._has_poll = True
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_until_with_select(self):
"""Use select.select() to implement telnet.read_until()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
telnet._has_poll = False
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_all_A(self):
"""
read_all()
Read all data until EOF; may block.
"""
want = ['x' * 500, 'y' * 500, 'z' * 500, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_all()
self.assertEqual(data, ''.join(want[:-1]))
return
def _test_blocking(self, func):
self.dataq.put([self.block_long, EOF_sigil])
self.dataq.join()
start = time.time()
data = func()
self.assertTrue(self.block_short <= time.time() - start)
def test_read_all_B(self):
self._test_blocking(telnetlib.Telnet(HOST, self.port).read_all)
def test_read_all_C(self):
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
telnet.read_all()
telnet.read_all() # shouldn't raise
def test_read_some_A(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
want = ['x' * 500, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_all()
self.assertTrue(len(data) >= 1)
def test_read_some_B(self):
# test EOF
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
self.assertEqual('', telnet.read_some())
def test_read_some_C(self):
self._test_blocking(telnetlib.Telnet(HOST, self.port).read_some)
def _test_read_any_eager_A(self, func_name):
"""
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = [self.block_long, 'x' * 100, 'y' * 100, EOF_sigil]
expects = want[1] + want[2]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
func = getattr(telnet, func_name)
data = ''
while True:
try:
data += func()
self.assertTrue(expects.startswith(data))
except EOFError:
break
self.assertEqual(expects, data)
def _test_read_any_eager_B(self, func_name):
# test EOF
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
func = getattr(telnet, func_name)
self.assertRaises(EOFError, func)
# read_eager and read_very_eager make the same gaurantees
# (they behave differently but we only test the gaurantees)
def test_read_very_eager_A(self):
self._test_read_any_eager_A('read_very_eager')
def test_read_very_eager_B(self):
self._test_read_any_eager_B('read_very_eager')
def test_read_eager_A(self):
self._test_read_any_eager_A('read_eager')
def test_read_eager_B(self):
self._test_read_any_eager_B('read_eager')
# NB -- we need to test the IAC block which is mentioned in the docstring
# but not in the module docs
def _test_read_any_lazy_B(self, func_name):
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
func = getattr(telnet, func_name)
telnet.fill_rawq()
self.assertRaises(EOFError, func)
def test_read_lazy_A(self):
want = ['x' * 100, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
self.assertEqual('', telnet.read_lazy())
data = ''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want[0].startswith(data))
self.assertEqual(data, want[0])
def test_read_lazy_B(self):
self._test_read_any_lazy_B('read_lazy')
def test_read_very_lazy_A(self):
want = ['x' * 100, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
self.assertEqual('', telnet.read_very_lazy())
data = ''
while True:
try:
read_data = telnet.read_very_lazy()
except EOFError:
break
data += read_data
if not read_data:
telnet.fill_rawq()
self.assertEqual('', telnet.cookedq)
telnet.process_rawq()
self.assertTrue(want[0].startswith(data))
self.assertEqual(data, want[0])
def test_read_very_lazy_B(self):
self._test_read_any_lazy_B('read_very_lazy')
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = ''
self.sb_getter = sb_getter
self.sb_seen = ''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class OptionTests(TestCase):
setUp = _read_setUp
tearDown = _read_tearDown
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
self.setUp()
self.dataq.put(data)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[0], self.cmds)
self.assertEqual(cmd[1], tl.NOOPT)
self.assertEqual(len(''.join(data[:-1])), len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
self.tearDown()
def test_IAC_commands(self):
# reset our setup
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
self.tearDown()
for cmd in self.cmds:
self._test_command(['x' * 100, tl.IAC + cmd, 'y'*100, EOF_sigil])
self._test_command(['x' * 10, tl.IAC + cmd, 'y'*10, EOF_sigil])
self._test_command([tl.IAC + cmd, EOF_sigil])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds] + [EOF_sigil])
self.assertEqual('', telnet.read_sb_data())
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + 'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + 'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + 'cc' + tl.IAC + tl.IAC + 'dd' + tl.IAC + tl.SE,
EOF_sigil,
]
self.dataq.put(send)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, '')
want_sb_data = tl.IAC + tl.IAC + 'aabb' + tl.IAC + 'cc' + tl.IAC + 'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual('', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
class ExpectTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.dataq = Queue.Queue()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock,
self.dataq))
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
# use a similar approach to testing timeouts as test_timeout.py
# these will never pass 100% but make the fuzz big enough that it is rare
block_long = 0.6
block_short = 0.3
def test_expect_A(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_expect_B(self):
# test the timeout - it does NOT raise socket.timeout
want = ['hello', self.block_long, 'not seen', EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
(_,_,data) = telnet.expect(['not seen'], self.block_short)
self.assertEqual(data, want[0])
self.assertEqual(telnet.read_all(), 'not seen')
def test_expect_with_poll(self):
"""Use select.poll() to implement telnet.expect()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
if not telnet._has_poll:
raise unittest.SkipTest('select.poll() is required')
telnet._has_poll = True
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_expect_with_select(self):
"""Use select.select() to implement telnet.expect()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
telnet._has_poll = False
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, ReadTests, OptionTests,
ExpectTests)
if __name__ == '__main__':
test_main()
|
_client.py
|
from statefun_tasks import TaskRequest, TaskResult, TaskException, DefaultSerialiser, PipelineBuilder
from statefun_tasks.client import TaskError
from google.protobuf.any_pb2 import Any
from kafka import KafkaProducer, KafkaConsumer, TopicPartition
import logging
import socket
from uuid import uuid4
from threading import Thread
import asyncio
from concurrent.futures import Future
_log = logging.getLogger('FlinkTasks')
class FlinkTasksClient(object):
def __init__(self, kafka_broker_url, request_topic, reply_topic, group_id=None, serialiser=None):
self._kafka_broker_url = kafka_broker_url
self._requests = {}
self._request_topic = request_topic
self._reply_topic = reply_topic
self._group_id = group_id
self._serialiser = serialiser if serialiser is not None else DefaultSerialiser()
self._producer = KafkaProducer(bootstrap_servers=[kafka_broker_url])
self._consumer = KafkaConsumer(
self._reply_topic,
bootstrap_servers=[self._kafka_broker_url],
auto_offset_reset='earliest',
group_id=self._group_id)
self._consumer_thread = Thread(target=self._consume, args=())
self._consumer_thread.daemon = True
self._consumer_thread.start()
def submit(self, pipeline: PipelineBuilder, topic=None):
task_request = pipeline.to_task_request(self._serialiser)
return self._submit_request(task_request, topic=topic)
async def submit_async(self, pipeline: PipelineBuilder, topic=None):
future, _ = self.submit(pipeline, topic=topic)
return await asyncio.wrap_future(future)
def _submit_request(self, task_request: TaskRequest, topic=None):
if task_request.id is None or task_request.id == "":
raise ValueError('Task request is missing an id')
if task_request.type is None or task_request.type == "":
raise ValueError('Task request is missing a type')
future = Future()
self._requests[task_request.id] = future
task_request.reply_topic = self._reply_topic
key = task_request.id.encode('utf-8')
val = task_request.SerializeToString()
topic = self._request_topic if topic is None else topic
self._producer.send(topic=topic, key=key, value=val)
self._producer.flush()
return future, task_request.id
def _consume(self):
while True:
try:
for message in self._consumer:
_log.info(f'Message received - {message}')
any = Any()
any.ParseFromString(message.value)
if any.Is(TaskException.DESCRIPTOR):
self._raise_exception(any)
elif any.Is(TaskResult.DESCRIPTOR):
self._return_result(any)
except Exception as ex:
_log.warning(f'Exception in consumer thread - {ex}', exc_info=ex)
def _return_result(self, any: Any):
task_result = TaskResult()
any.Unpack(task_result)
correlation_id = task_result.correlation_id
future = self._requests.get(correlation_id, None)
if future is not None:
del self._requests[correlation_id]
try:
result, _ = self._serialiser.deserialise_result(task_result)
future.set_result(result)
except Exception as ex:
future.set_exception(ex)
def _raise_exception(self, any: Any):
task_exception = TaskException()
any.Unpack(task_exception)
correlation_id = task_exception.correlation_id
future = self._requests.get(correlation_id, None)
if future is not None:
del self._requests[correlation_id]
try:
future.set_exception(TaskError(task_exception))
except Exception as ex:
future.set_exception(ex)
class FlinkTasksClientFactory():
__clients = {}
@staticmethod
def get_client(kafka_broker_url, request_topic, reply_topic_prefix, serialiser=None) -> FlinkTasksClient:
key = f'{kafka_broker_url}.{request_topic}.{reply_topic_prefix}'
if key not in FlinkTasksClientFactory.__clients:
reply_topic = f'{reply_topic_prefix}.{socket.gethostname()}.{str(uuid4())}'
client = FlinkTasksClient(kafka_broker_url, request_topic, reply_topic, serialiser=serialiser)
FlinkTasksClientFactory.__clients[key] = client
return FlinkTasksClientFactory.__clients[key]
|
main.py
|
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from json_manager import *
from pygame import mixer
from pytube import YouTube
from moviepy.video.io.VideoFileClip import VideoFileClip
import threading as thr
root = Tk()
root.title('MuPlay [vs1]')
root.geometry('400x500')
root.resizable(False, False)
my_icon = PhotoImage(file='ast/MuPlay_logo.png')
root.iconphoto(False, my_icon)
dark = '#3d3d3d'
my_font = 'Segoe UI Bold Italic'
root.config(bg=dark)
main_frame = Frame(root, bg=dark)
main_frame.pack(fill=BOTH, expand=True)
start_frame = Frame(root, bg=dark)
def download_song():
dwn_frame = Frame(root, bg=dark)
main_frame.pack_forget()
dwn_frame.pack(fill=BOTH, expand=True)
lbl30 = Label(dwn_frame, text='Enter Youtube Video URL : ', bg=dark, fg='white',
font=(my_font, 20))
lbl30.pack(pady=10)
def return_to_main():
dwn_frame.pack_forget()
main_frame.pack(fill=BOTH, expand=True)
def on_enter4(e):
btn30['fg'] = 'orange'
btn30['bg'] = dark
def on_leave2(e):
btn30['fg'] = 'white'
btn30['bg'] = dark
btn30 = Button(dwn_frame, text='<-- Back', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 20), activebackground=dark, activeforeground='orange',
command=return_to_main)
btn30.pack(side=BOTTOM, pady=5)
btn30.bind('<Enter>', on_enter4)
btn30.bind('<Leave>', on_leave2)
ent30 = Entry(dwn_frame, bg=dark, bd=1, fg='white', width=50)
ent30.pack(pady=10)
def play_song_ui():
download_frame = Frame(root, bg=dark)
main_frame.pack_forget()
dwn_frame.pack_forget()
download_frame.pack(fill=BOTH, expand=True)
messagebox.showinfo('Downloaded Song!', 'Song Downloaded at '
'~/musicsong/downloadedMusic/')
def start_playing_music():
def play_music_cmd():
mixer.init()
mixer.music.load('musicsong/downloadedMusic/my_audio.mp3')
mixer.music.play()
func = thr.Thread(target=play_music_cmd)
func.start()
def stop_music():
mixer.init()
mixer.music.stop()
global paused_if
paused_if = False
def pause_music(is_paused):
global paused_if
paused_if = is_paused
if paused_if:
mixer.music.unpause()
paused_if = False
else:
mixer.music.pause()
paused_if = True
play_logo_inactive = PhotoImage(file='ast/play_logo.png')
play_logo_active = PhotoImage(file='ast/play_logo_act.png')
pause_logo_inactive = PhotoImage(file='ast/pause_logo.png')
pause_logo_active = PhotoImage(file='ast/pause_logo_act.png')
stop_logo_inactive = PhotoImage(file='ast/stop_logo.png')
stop_logo_active = PhotoImage(file='ast/stop_logo_act.png')
def on_ent5(e):
lbl20_play['image'] = play_logo_active
def on_ent6(e):
lbl21_pause['image'] = pause_logo_active
def on_ent7(e):
btn22['image'] = stop_logo_active
def on_ent8(e):
btn23['fg'] = 'orange'
btn23['bg'] = dark
def on_lv(e):
lbl21_pause['image'] = pause_logo_inactive
lbl20_play['image'] = play_logo_inactive
btn22['image'] = stop_logo_inactive
btn23['fg'] = 'white'
btn23['bg'] = dark
lbl20_play = Button(download_frame, image=play_logo_inactive, command=start_playing_music,
bd=0, relief=SOLID, highlightthickness=0)
lbl20_play.pack(pady=20)
lbl20_play.bind('<Enter>', on_ent5)
lbl20_play.bind('<Leave>', on_lv)
lbl21_pause = Button(download_frame, image=pause_logo_inactive, command=lambda: pause_music(paused_if),
bd=0, highlightthickness=0)
lbl21_pause.pack(pady=20)
lbl21_pause.bind('<Enter>', on_ent6)
lbl21_pause.bind('<Leave>', on_lv)
btn22 = Button(download_frame, image=stop_logo_inactive,
highlightthickness=0, bd=0, relief=SOLID, command=stop_music)
btn22.pack(pady=20)
btn22.bind('<Enter>', on_ent7)
btn22.bind('<Leave>', on_lv)
def return_to_start():
stop_music()
download_frame.pack_forget()
dwn_frame.pack_forget()
start_frame.pack_forget()
main_frame.pack(fill=BOTH, expand=True)
btn23 = Button(download_frame, text='<-- Back',
bd=0, relief=SOLID, command=return_to_start,
bg=dark, fg='white', font=(my_font, 10), highlightthickness=0)
btn23.pack(side=BOTTOM, pady=5)
btn23.bind('<Enter>', on_ent8)
btn23.bind('<Leave>', on_lv)
def vid_to_aud_conv():
mp4_file_dir = 'musicsong/downloadedMusic/my_video.mp4'
mp3_file_dir = 'musicsong/downloadedMusic/my_audio.mp3'
videoClip = VideoFileClip(mp4_file_dir)
audioClip = videoClip.audio
audioClip.write_audiofile(mp3_file_dir)
audioClip.close()
videoClip.close()
play_song_ui()
def download_video():
vid_url = ent30.get()
my_vid = YouTube(vid_url)
my_vid = my_vid.streams.get_highest_resolution()
my_vid.download(output_path='musicsong/downloadedMusic',
filename='my_video.mp4')
vid_to_aud_conv()
btn31 = Button(dwn_frame, text='Download & Play', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 20), activebackground=dark, activeforeground='orange',
command=download_video)
btn31.pack(pady=10, side=BOTTOM)
def play_custom_music():
sub_frame = Frame(root, bg=dark)
start_frame.pack_forget()
sub_frame.pack(fill=BOTH, expand=True)
file_dir = filedialog.askopenfilename(title='Choose Your Song ( .mp3 only )',
filetypes=(("MP3 Files", "*.mp3"),))
if file_dir == '':
messagebox.showerror('Error!', 'Please Choose a File To Continue')
sub_frame.pack_forget()
start_frame.pack(fill=BOTH, expand=True)
else:
def start_playing_music():
def play_music_cmd():
mixer.init()
mixer.music.load(file_dir)
mixer.music.play()
func = thr.Thread(target=play_music_cmd)
func.start()
def stop_music():
mixer.init()
mixer.music.stop()
global paused_if
paused_if = False
def pause_music(is_paused):
global paused_if
paused_if = is_paused
if paused_if:
mixer.music.unpause()
paused_if = False
else:
mixer.music.pause()
paused_if = True
play_logo_inactive = PhotoImage(file='ast/play_logo.png')
play_logo_active = PhotoImage(file='ast/play_logo_act.png')
pause_logo_inactive = PhotoImage(file='ast/pause_logo.png')
pause_logo_active = PhotoImage(file='ast/pause_logo_act.png')
stop_logo_inactive = PhotoImage(file='ast/stop_logo.png')
stop_logo_active = PhotoImage(file='ast/stop_logo_act.png')
def on_ent5(e):
lbl20_play['image'] = play_logo_active
def on_ent6(e):
lbl21_pause['image'] = pause_logo_active
def on_ent7(e):
btn22['image'] = stop_logo_active
def on_ent8(e):
btn23['fg'] = 'orange'
btn23['bg'] = dark
def on_lv(e):
lbl21_pause['image'] = pause_logo_inactive
lbl20_play['image'] = play_logo_inactive
btn22['image'] = stop_logo_inactive
btn23['fg'] = 'white'
btn23['bg'] = dark
lbl20_play = Button(sub_frame, image=play_logo_inactive, command=start_playing_music,
bd=0, relief=SOLID, highlightthickness=0)
lbl20_play.pack(pady=20)
lbl20_play.bind('<Enter>', on_ent5)
lbl20_play.bind('<Leave>', on_lv)
lbl21_pause = Button(sub_frame, image=pause_logo_inactive, command=lambda: pause_music(paused_if),
bd=0, highlightthickness=0)
lbl21_pause.pack(pady=20)
lbl21_pause.bind('<Enter>', on_ent6)
lbl21_pause.bind('<Leave>', on_lv)
btn22 = Button(sub_frame, image=stop_logo_inactive,
highlightthickness=0, bd=0, relief=SOLID, command=stop_music)
btn22.pack(pady=20)
btn22.bind('<Enter>', on_ent7)
btn22.bind('<Leave>', on_lv)
def return_to_start():
stop_music()
sub_frame.pack_forget()
start_frame.pack(fill=BOTH, expand=True)
btn23 = Button(sub_frame, text='<-- Back',
bd=0, relief=SOLID, command=return_to_start,
bg=dark, fg='white', font=(my_font, 10), highlightthickness=0)
btn23.pack(side=BOTTOM, pady=5)
btn23.bind('<Enter>', on_ent8)
btn23.bind('<Leave>', on_lv)
# Start of Start_Page: -----
def play_music():
def start_playing_music():
def play_music_cmd():
mixer.init()
my_selected_playlist = jsn.get_data('currentPlaylist')
if my_selected_playlist == 'thh':
mixer.music.load('musicsong/installedMusic/trapHipHop/trapHiphopSong .mp3')
mixer.music.play()
elif my_selected_playlist == 'lhh':
mixer.music.load('musicsong/installedMusic/lofiHipHop/lofiHiphopSong.mp3')
mixer.music.play()
func = thr.Thread(target=play_music_cmd)
func.start()
def stop_music():
mixer.init()
mixer.music.stop()
global paused_if
paused_if = False
def pause_music(is_paused):
global paused_if
paused_if = is_paused
if paused_if:
mixer.music.unpause()
paused_if = False
else:
mixer.music.pause()
paused_if = True
play_frame = Frame(root, bg=dark)
main_frame.pack_forget()
play_frame.pack(fill=BOTH, expand=True)
play_logo_inactive = PhotoImage(file='ast/play_logo.png')
play_logo_active = PhotoImage(file='ast/play_logo_act.png')
pause_logo_inactive = PhotoImage(file='ast/pause_logo.png')
pause_logo_active = PhotoImage(file='ast/pause_logo_act.png')
stop_logo_inactive = PhotoImage(file='ast/stop_logo.png')
stop_logo_active = PhotoImage(file='ast/stop_logo_act.png')
def on_ent5(e):
lbl20_play['image'] = play_logo_active
def on_ent6(e):
lbl21_pause['image'] = pause_logo_active
def on_ent7(e):
btn22['image'] = stop_logo_active
def on_ent8(e):
btn23['fg'] = 'orange'
btn23['bg'] = dark
def on_lv(e):
lbl21_pause['image'] = pause_logo_inactive
lbl20_play['image'] = play_logo_inactive
btn22['image'] = stop_logo_inactive
btn23['fg'] = 'white'
btn23['bg'] = dark
lbl20_play = Button(play_frame, image=play_logo_inactive, command=start_playing_music,
bd=0, relief=SOLID, highlightthickness=0)
lbl20_play.pack(pady=20)
lbl20_play.bind('<Enter>', on_ent5)
lbl20_play.bind('<Leave>', on_lv)
lbl21_pause = Button(play_frame, image=pause_logo_inactive, command=lambda: pause_music(paused_if),
bd=0, highlightthickness=0)
lbl21_pause.pack(pady=20)
lbl21_pause.bind('<Enter>', on_ent6)
lbl21_pause.bind('<Leave>', on_lv)
btn22 = Button(play_frame, image=stop_logo_inactive,
highlightthickness=0, bd=0, relief=SOLID, command=stop_music)
btn22.pack(pady=20)
btn22.bind('<Enter>', on_ent7)
btn22.bind('<Leave>', on_lv)
def return_to_start():
stop_music()
play_frame.pack_forget()
main_frame.pack(fill=BOTH, expand=True)
btn23 = Button(play_frame, text='<-- Back',
bd=0, relief=SOLID, command=return_to_start,
bg=dark, fg='white', font=(my_font, 10), highlightthickness=0)
btn23.pack(side=BOTTOM, pady=5)
btn23.bind('<Enter>', on_ent8)
btn23.bind('<Leave>', on_lv)
def start_page():
main_frame.pack_forget()
start_frame.pack(fill=BOTH, expand=True)
def return_to_start():
start_frame.pack_forget()
main_frame.pack(fill=BOTH, expand=True)
start_frame.pack_forget()
music_genres = ['Select Here', 'Lofi HipHop', 'Trap HipHop', 'User Custom Playlist']
lbl10 = Label(start_frame, text='Choose Music Genre : ', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 25))
lbl10.pack(pady=10)
def combo_action(e):
selected_playlist = cmbx.get()
jsn = Json_Manager('settings.json')
jsn.write_data('anyError', False)
if selected_playlist == 'Trap HipHop':
jsn.append_data('currentPlaylist', 'thh')
elif selected_playlist == 'Lofi HipHop':
jsn.append_data('currentPlaylist', 'lhh')
elif selected_playlist == 'User Custom Playlist':
jsn.append_data('currentPlaylist', 'uss')
def check_if_combx_empty():
selected_playlist = cmbx.get()
try:
if selected_playlist == 'Select Here':
messagebox.showerror('Empty Selection!', 'Please Select Something in The Combobox!')
jsn.change_data('anyError', True)
finally:
check_for_error = jsn.get_data('anyError')
my_uss_playlist = jsn.get_data('currentPlaylist')
if not check_for_error:
if my_uss_playlist == 'uss':
play_custom_music()
else:
start_frame.pack_forget()
play_music()
cmbx = ttk.Combobox(start_frame, value=music_genres, width=44)
cmbx.current(0)
cmbx.pack(pady=10)
cmbx.bind('<<ComboboxSelected>>', combo_action)
def on_enter3(e):
btn10['fg'] = 'orange'
btn10['bg'] = dark
def on_enter4(e):
btn11['fg'] = 'red'
btn11['bg'] = dark
def on_leave2(e):
btn10['fg'] = 'white'
btn10['bg'] = dark
btn11['fg'] = 'white'
btn11['bg'] = dark
btn11 = Button(start_frame, text='<-- Back', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 30), activebackground=dark, activeforeground='orange',
command=return_to_start)
btn11.pack(side=BOTTOM, pady=10)
btn11.bind('<Enter>', on_enter4)
btn11.bind('<Leave>', on_leave2)
btn10 = Button(start_frame, text='Continue', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 35), activebackground=dark, activeforeground='orange',
command=check_if_combx_empty)
btn10.pack(side=BOTTOM, pady=10)
btn10.bind('<Enter>', on_enter3)
btn10.bind('<Leave>', on_leave2)
# End of Start_Page: -----
logo_dark_theme = PhotoImage(file='ast/MuPlay_logo_darktheme.png')
lbl1 = Label(main_frame, image=logo_dark_theme, relief=SUNKEN, bd=0)
lbl1.pack(pady=10)
def on_enter1(e):
btn1['bg'] = dark
btn1['fg'] = '#52ffff'
btn1.config(font=(my_font, 38))
def on_enter2(e):
btn2['bg'] = dark
btn2['fg'] = '#52ffff'
btn2.config(font=(my_font, 33))
def on_leave1(e):
btn1['bg'] = dark
btn1['fg'] = 'white'
btn2.config(font=(my_font, 30))
btn2['bg'] = dark
btn2['fg'] = 'white'
btn1.config(font=(my_font, 35))
btn1 = Button(main_frame, text='Start!', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 35), activebackground=dark, activeforeground='orange',
command=start_page)
btn1.pack(pady=10)
btn2 = Button(main_frame, text='Download Songs', relief=FLAT, bd=0, bg=dark, fg='white',
font=(my_font, 30), activebackground=dark, activeforeground='orange',
command=download_song)
btn2.pack(pady=10)
jsn = Json_Manager('settings.json')
btn1.bind('<Enter>', on_enter1)
btn1.bind('<Leave>', on_leave1)
btn2.bind('<Enter>', on_enter2)
btn2.bind('<Leave>', on_leave1)
root.mainloop()
jsn.clear_data()
|
serial_plot.py
|
#!/usr/bin/env python
#
# Pavel Kirienko, 2013 <pavel.kirienko@gmail.com>
#
from PyQt4 import QtGui
from plot_widget import RealtimePlotWidget
from serial_reader import SerialReader
import sys, threading, time
SER_PORT = '/dev/ttyUSB0' if len(sys.argv) < 2 else sys.argv[1]
SER_BAUDRATE = 115200
def raw_handler(line):
print line
def value_handler(x, values):
for i,val in enumerate(values):
add_data_point(i, x, val)
plot.lazy_redraw(0.2)
def add_data_point(curve_id, x, y):
try:
plot.update_values(curve_id, [x], [y])
except KeyError:
plot.add_curve(curve_id, str(curve_id), [x], [y])
initial_timestamp = time.time()
app = QtGui.QApplication(sys.argv)
plot = RealtimePlotWidget()
reader = SerialReader(SER_PORT, SER_BAUDRATE)
thd = threading.Thread(target=reader.run, args=(value_handler, raw_handler))
thd.daemon = True
thd.start()
plot.redraw()
plot.show()
exit(app.exec_())
|
batch_reader.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
# Modifications Copyright 2018 Arman Cohan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from itertools import chain
import sys
"""This file contains code to process data into batches"""
from six.moves import queue as Queue
from six.moves import xrange
import six
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
import nltk
# To represent list of sections as string and retrieve it back
SECTION_SEPARATOR = ' <SCTN/> '
# to represent separator as string, end of item (ei)
LIST_SEPARATOR = ' <EI/> '
def _string_to_list(s, dtype='str'):
""" converts string to list
Args:
s: input
dtype: specifies the type of elements in the list
can be one of `str` or `int`
"""
if dtype == 'str':
return s.split(LIST_SEPARATOR)
elif dtype == 'int':
return [int(e) for e in s.split(LIST_SEPARATOR) if e]
def _string_to_nested_list(s):
return [e.split(LIST_SEPARATOR)
for e in s.split(SECTION_SEPARATOR)]
# TEMP REMOVED, May be not needed
# def _count_words(s):
# """ Count words in a list of strings """
# return sum([len(e.split(' ')) for e in s])
def _section_to_ids(section, vocab, max_len, pad_id):
""" Converts words in a section (list of strings) to ids and pad if necessary """
section_text = ' '.join(section)
section_words = nltk.wordpunct_tokenize(section_text)
sec_len = len(section_words)
if sec_len > max_len:
section_words = section_words[:max_len]
word_ids = [vocab.word2id(w) for w in section_words]
def _flatten(lst):
""" flatten a nested list (list of lists) """
return list(chain.from_iterable(lst))
def _get_section_words(sec, max_len=None, pad_id=data.PAD_TOKEN, pad=True):
""" given a section (list of sentences), returns a single list of words in that section """
words = nltk.wordpunct_tokenize(' '.join(sec))
if max_len is None:
max_len = len(words)
if pad:
while len(words) < max_len:
words += [pad_id]
return words[:max_len]
def _pad_words(words, max_len=None, pad_id=data.PAD_TOKEN):
""" given a section (list of sentences), returns a single list of words in that section """
if max_len is None:
max_len = len(words)
while len(words) < max_len:
words += [pad_id]
return words[:max_len]
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, article_id, sections, section_names, labels, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a list of strings. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
article_id: string
sections: list of list of strings
section_names: list of strings
labels: list of strings, for extractive summarization training (TODO Later)
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
self.discard = False
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# clean section information
# clean sections after conclusions
if hps.hier:
end_loc = len(section_names)
beg_loc = 0
for i,s in enumerate(section_names):
if 'conclu' in s.lower():
end_loc = i + 1
if 'intro' in s.lower() and beg_loc == 0:
beg_loc = i
if beg_loc < len(section_names) - end_loc:
sections = sections[beg_loc:end_loc]
try:
intro_last = sections[beg_loc][-2:] # last two sentences in the intro
except IndexError:
# print('article_id: {}, len(sections): {}, section_names: {}'.format(article_id, len(sections), section_names))
self.discard = True
return
# intro_first = []
i = 0
# intro_last_len = _count_words(intro_last)
# intro_len = intro_last_len
# while(intro_len < hps.max_intro_len):
# intro_first.append(sections[beg_loc][i])
# intro_len = _count_words(intro_first) + intro_last_len
# i += 1
if not hps.split_intro:
max_sents = hps.max_intro_sents - 2 # exclude the last two sents
intro_first = sections[beg_loc][:max_sents]
intro_last_words = _get_section_words(intro_last, pad=False)
intro_last_len = len(intro_last_words) # flatten list of sents, get the string inside, count words
discard_last = False
if intro_last_len > hps.max_intro_len:
discard_last = True
len_limit = hps.max_intro_len - intro_last_len if not discard_last else hps.max_intro_len
# truncate the intro by len_limit (we consider last 2 sentences from the intro to be there always)
# Flatten list of lists, get the first element (string), get words, get first n words, return a striing, make it a list, extend it with intro_last
intro_words = _get_section_words(intro_first, len_limit, pad=False)
try:
if intro_words[-1] != '.':
intro_words = intro_words[:-1] + ['.']
if not discard_last:
intro_words += intro_last_words
intro_words = _pad_words(intro_words, hps.max_intro_len)
except IndexError:
print('No first section, Example discarded: ', article_id)
self.discard = True
else:
intro_first = sections[beg_loc][:hps.max_intro_sents]
intro_words = _get_section_words(intro_first, hps.max_intro_len, pad=True)
try:
conclusion_words = _get_section_words(sections[end_loc - beg_loc - 1][:hps.max_conclusion_sents], hps.max_conclusion_len)
except:
import pdb; pdb.set_trace()
print("ERROR, pause and check")
print('end_loc:', end_loc)
print('section_names:', section_names)
print('num_sections: {}'.format(len(sections)))
print('len_sections_sents:', [len(e) for e in sections])
# if not hps.intro_split:
article_sections = [_get_section_words(s[:hps.max_section_sents], hps.max_section_len)
for s in sections[1:-1][:hps.num_sections - 2]]
# else:
# tmp_sections = []
# remaining_sec = sections[1:-1]
# if len(remaining_sec) > hps.num_sections - 2:
# for i in range(hps.num_sections - 2):
# tmp_sections.append(remaining_sec[i])
# last_sec = []
# while(i < len(remaining_sec)):
# last_sec.extend(remaining_sec[i])
# i += 1
# tmp_sections.append(last_sec)
# remaining_sec = tmp_sections
#
# article_sections = [_get_section_words(s, hps.max_section_len)
# for s in remaining_sec]
sections = [intro_words] + article_sections + [conclusion_words]
sec_len = len(sections)
self.sec_len = sec_len
self.num_words_section = [hps.max_section_len for e in sections]
self.num_words_section_nopad = [len(e) for e in sections]
# TODO: Assumption is that sections is a list of list (sections, sentences), check if assumption is true
# TODO: Assumtpion is that number of sections is greater than 2, check if assumption is true
# pad_id = vocab.word2id(data.PAD_TOKEN)
article_text = ' '.join(article)
# Process the article
article_words = nltk.wordpunct_tokenize(article_text)
if len(article_words) > hps.max_enc_steps:
article_words = article_words[:hps.max_enc_steps]
# store the length after truncation but before padding
self.enc_len = len(article_words)
# list of word ids; OOVs are represented by the id for UNK token
self.enc_input = [vocab.word2id(w) for w in article_words]
if hps.hier:
self.enc_sections = []
for sec in sections:
self.enc_sections.append([vocab.word2id(w) for w in sec])
self.enc_sec_len = [len(e) for e in self.enc_sections]
# self.enc_sec_len = sec_len # TODO: Check
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = nltk.wordpunct_tokenize(abstract) # list of strings
# list of word ids; OOVs are represented by the id for UNK token
abs_ids = [vocab.word2id(w) for w in abstract_words]
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(
abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen:
# Store a version of the enc_input where in-article OOVs are
# represented by their temporary OOV id; also store the in-article
# OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(
article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are
# represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(
abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV
# ids, the target now includes words that are in the article but
# not in the abstract, so represented as OOV
_, self.target = self.get_dec_inp_targ_seqs(
abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
self.article_id = article_id
self.sections = sections
self.section_names = section_names
self.labels = labels
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
def pad_section_input(self, max_sec_len, max_secs, pad_id=None):
"""pad encoder sections with pad_id. if the number of sections is less than max_secs, add another section with all pads"""
# print("Before padding -> len: {}, element_len: {}, element_type: {}".format(len(self.enc_sections), len(self.enc_sections[0]), type(self.enc_sections[0][0])))
for i, sec in enumerate(self.enc_sections):
while len(sec) < max_sec_len:
self.enc_sections[i].append(pad_id)
while len(self.enc_sections) < max_secs:
pads = [pad_id for _ in range(max_sec_len)]
self.enc_sections.append(pads)
self.num_words_section.append(len(pads))
# self.num_words_section.append(0)
# print("After padding -> len: {}, element_len: {}, element_type: {}\n\n".format(len(self.enc_sections), len(self.enc_sections[0]), type(self.enc_sections[0][0])))
# if self.hps.pointer_gen:
# while len(self.enc_input_extend_vocab) < max_len:
# self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self._hps = hps
self.pad_id = vocab.word2id(
data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.sec_pad_id = vocab.word2id(data.SEC_PAD_TOKEN)
# initialize the input to the encoder
self.init_encoder_seq(example_list, hps)
# initialize the input and targets for the decoder
self.init_decoder_seq(example_list, hps)
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids
(all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers.
The (truncated) length of each encoder input sequence (pre-padding).
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by
their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this
# batch
if hps.hier:
max_enc_seq_len = hps.max_section_len * hps.num_sections
else:
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest
# sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension)
# for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros(
(hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs)
for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros(
(hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i,
:] = ex.enc_input_extend_vocab[:]
if self._hps.hier:
# TODO: see if you can uncomment it. Doesn't work because of unstack in the model
# max_num_sections = max([ex.sec_len for ex in example_list])
max_num_sections = self._hps.num_sections
max_num_sections_nopad = max([ex.sec_len for ex in example_list])
for ex in example_list:
# ex.pad_section_input(max_num_sections, self.sec_pad_id)
ex.pad_section_input(self._hps.max_section_len, max_num_sections, self.pad_id)
self.batch_doc_sec_lens = [max_num_sections for _ in example_list]
self.batch_sections = np.zeros((hps.batch_size, max_num_sections, self._hps.max_section_len), dtype=np.int32)
self.batch_sections_len = np.zeros((hps.batch_size, max_num_sections), dtype=np.int32)
self.batch_sections_len_nopad = np.zeros((hps.batch_size, max_num_sections_nopad), dtype=np.int32)
self.enc_section_padding_mask = np.zeros((hps.batch_size, max_num_sections, self._hps.max_section_len), dtype=np.float32)
for i, ex in enumerate(example_list):
j = 0
while(j < len(ex.enc_sections)):
self.batch_sections[i, j, :] = ex.enc_sections[j][:self._hps.max_section_len]
if j < len(ex.enc_sec_len):
for k in range(min(ex.enc_sec_len[j], self._hps.max_section_len)):
self.enc_section_padding_mask[i][j][k] = 1
j += 1
self.batch_sections_len[i, :] = ex.num_words_section[:]
try:
self.batch_sections_len_nopad[i, :] = ex.num_words_section_nopad[:]
except ValueError: # in cases that we want to assign a length 3 list to length 4
for k in range(len(ex.num_words_section_nopad)):
self.batch_sections_len_nopad[i, k] = ex.num_words_section_nopad[k]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps),
containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps),
containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps),
containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch;
0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch
# (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding.
# However I believe this is possible, or will soon be possible, with Tensorflow 1.0,
# in which case it may be best to upgrade to that.
self.dec_batch = np.zeros(
(hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros(
(hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros(
(hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in xrange(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [
ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [
ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [
ex.original_abstract_sents for ex in example_list] # list of list of lists
self.article_ids = [ex.article_id for ex in example_list]
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass,
article_id_key,
article_key, abstract_key,
labels_key,
section_names_key,
sections_key):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once
(useful for when you want to run evaluation on the dev or test set).
Otherwise generate random batches indefinitely (useful for training).
article_id_key: article id key in tf.Example
article_key: article feature key in tf.Example.
abstract_key: abstract feature key in tf.Example.
labels_key: labels feature key in tf.Example,
section_names_key: section names key in tf.Example,
sections_key: sections key in tf.Example,
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
# Initialize a queue of Batches waiting to be used, and a queue of
# Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(
self.BATCH_QUEUE_MAX * self._hps.batch_size)
self._article_id_key = article_id_key
self._article_key = article_key
self._abstract_key = abstract_key
self._labels_key = labels_key
self._section_names_key = section_names_key
self._sections_key = sections_key
# Different settings depending on whether we're in single_pass mode or
# not
if single_pass:
# just one thread, so we read through the dataset just once
self._num_example_q_threads = 1
self._num_batch_q_threads = 1 # just one thread to batch examples
# only load one batch's worth of examples before bucketing; this
# essentially means no bucketing
self._bucketing_cache_size = 1
# this will tell us when we're finished reading the dataset
self._finished_reading = False
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
# how many batches-worth of examples to load into cache before
# bucketing
self._bucketing_cache_size = 100
# Start the threads that load the queues
self._example_q_threads = []
for _ in xrange(self._num_example_q_threads):
self._example_q_threads.append(
Thread(target=self._fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in xrange(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self._fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if
# they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example
repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning(
'Bucket input queue is empty when calling next_batch.'
' Bucket queue size: %i, Input queue size: %i',
self._batch_queue.qsize(),
self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info(
"Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def _fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(
data.example_generator(self._data_path, self._single_pass))
cnt = 0
fail = 0
while True:
try:
# read the next example from file. article and abstract are
# both strings.
(article_id, article_text, abstract_sents, labels,
section_names, sections) = six.next(input_gen)
except StopIteration: # if there are no more examples:
tf.logging.info(
"The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info(
"single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception(
"single_pass mode is off but the example generator is out of data; error.")
# Use the <s> and </s> tags in abstract to get a list of sentences.
# abstract_sentences = [sent.strip() for sent in data.abstract2sents(''.join(abstract_sents))]
abstract_sentences = [e.replace(data.SENTENCE_START, '').replace(data.SENTENCE_END, '').strip()
for e in abstract_sents]
# at least 2 sections, some articles do not have sections
if "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _" in article_text:
continue
if len(sections) <= 1:
continue
if not sections or len(sections) == 0:
continue
# do not process that are too long
if len(article_text) > self._hps.max_article_sents:
continue
# Do not process documents with unusually long or short abstracts
abst_len = len(' '.join(abstract_sentences).split())
if abst_len > self._hps.max_abstract_len or\
abst_len < self._hps.min_abstract_len:
continue
# Process into an Example.
example = Example(article_text, abstract_sentences, article_id, sections, section_names, labels,
self._vocab, self._hps)
# place the Example in the example queue.
if example.discard:
fail += 1
cnt += 1
if example is not None and not example.discard:
self._example_queue.put(example)
if cnt % 100 == 0:
print('total in queue: {} of {}'.format(cnt - fail, cnt))
def _fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length,
processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode != 'decode':
# Get bucketing_cache_size-many batches of Examples into a
# list, then sort
inputs = []
for _ in xrange(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
# sort by length of encoder sequence
inputs = sorted(inputs, key=lambda inp: inp.enc_len)
# Group the sorted Examples into batches, optionally shuffle
# the batches, and place in the batch queue.
batches = []
for i in xrange(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in xrange(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx, t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error(
'Found example queue thread dead. Restarting.')
new_t = Thread(target=self._fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx, t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error(
'Found batch queue thread dead. Restarting.')
new_t = Thread(target=self._fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_gen):
"""Generates article and abstract text from tf.Example."""
while True:
e = six.next(example_gen)
try:
article_id = self._get_example_feature(e, self._article_id_key)
article_text = self._get_example_feature(e, self._article_key)
abstract_text = self._get_example_feature(
e, self._abstract_key)
if not self._hps.pubmed:
labels = self._get_example_feature(e, self._labels_key)
section_names = self._get_example_feature(
e, self._section_names_key)
sections = self._get_example_feature(e, self._sections_key)
# convert to list
article_text = _string_to_list(article_text)
abstract_text = _string_to_list(abstract_text)
if not self._hps.pubmed:
labels = _string_to_list(labels, dtype='int')
else:
labels = None
section_names = _string_to_list(section_names)
sections = _string_to_nested_list(sections) # list of lists
except ValueError:
tf.logging.error(
'Failed to get article or abstract from example')
continue
yield (article_id, article_text, abstract_text, labels, section_names, sections)
def _get_example_feature(self, ex, key):
"""Extract text for a feature from td.Example.
Args:
ex: tf.Example.
key: key of the feature to be extracted.
Returns:
feature: a feature text extracted.
"""
return ex.features.feature[key].bytes_list.value[0].decode(
'utf-8', 'ignore')
|
gap.py
|
r"""
Interface to GAP
Sage provides an interface to the GAP system. This system provides
extensive group theory, combinatorics, etc.
The GAP interface will only work if GAP is installed on your
computer; this should be the case, since GAP is included with Sage.
The interface offers three pieces of functionality:
#. ``gap_console()`` - A function that dumps you into
an interactive command-line GAP session.
#. ``gap(expr)`` - Evaluation of arbitrary GAP
expressions, with the result returned as a string.
#. ``gap.new(expr)`` - Creation of a Sage object that
wraps a GAP object. This provides a Pythonic interface to GAP. For
example, if ``f=gap.new(10)``, then
``f.Factors()`` returns the prime factorization of
`10` computed using GAP.
First Examples
--------------
We factor an integer using GAP::
sage: n = gap(20062006); n
20062006
sage: n.parent()
Gap
sage: fac = n.Factors(); fac
[ 2, 17, 59, 73, 137 ]
sage: fac.parent()
Gap
sage: fac[1]
2
GAP and Singular
----------------
This example illustrates conversion between Singular and GAP via
Sage as an intermediate step. First we create and factor a Singular
polynomial.
::
sage: singular(389)
389
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: f = singular('9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8')
sage: F = f.factorize()
sage: print(F)
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
Next we convert the factor `-x^5+y^2` to a Sage
multivariate polynomial. Note that it is important to let
`x` and `y` be the generators of a polynomial ring,
so the eval command works.
::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: s = F[1][3].sage_polystring(); s
'-x**5+y**2'
sage: g = eval(s); g
-x^5 + y^2
Next we create a polynomial ring in GAP and obtain its
indeterminates::
sage: R = gap.PolynomialRing('Rationals', 2); R
PolynomialRing( Rationals, ["x_1", "x_2"] )
sage: I = R.IndeterminatesOfPolynomialRing(); I
[ x_1, x_2 ]
In order to eval `g` in GAP, we need to tell GAP to view
the variables ``x0`` and ``x1`` as the two
generators of `R`. This is the one tricky part. In the GAP
interpreter the object ``I`` has its own name (which
isn't ``I``). We can access its name using
``I.name()``.
::
sage: _ = gap.eval("x := %s[1];; y := %s[2];;"%(I.name(), I.name()))
Now `x_0` and `x_1` are defined, so we can
construct the GAP polynomial `f` corresponding to
`g`::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: f = gap(str(g)); f
-x_1^5+x_2^2
We can call GAP functions on `f`. For example, we evaluate
the GAP ``Value`` function, which evaluates `f`
at the point `(1,2)`.
::
sage: f.Value(I, [1,2])
3
sage: g(1,2) # agrees
3
Saving and loading objects
--------------------------
Saving and loading GAP objects (using the dumps method, etc.) is
*not* supported, since the output string representation of Gap
objects is sometimes not valid input to GAP. Creating classes that
wrap GAP objects *is* supported, via simply defining the a
_gap_init_ member function that returns a string that when
evaluated in GAP constructs the object. See
``groups/perm_gps/permgroup.py`` for a nontrivial
example of this.
Long Input
----------
The GAP interface reads in even very long input (using files) in a
robust manner, as long as you are creating a new object.
.. note::
Using ``gap.eval`` for long input is much less robust, and is not
recommended.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = gap(t)
Changing which GAP is used
--------------------------
Use this code to change which GAP interpreter is run. E.g.,
::
import sage.interfaces.gap
sage.interfaces.gap.gap_cmd = "/usr/local/bin/gap"
AUTHORS:
- David Joyner and William Stein: initial version(s)
- William Stein (2006-02-01): modified gap_console command so it uses
exactly the same startup command as Gap.__init__.
- William Stein (2006-03-02): added tab completions: gap.[tab], x =
gap(...), x.[tab], and docs, e.g., gap.function? and x.function?
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from sage.env import SAGE_LOCAL, SAGE_EXTCODE, DOT_SAGE
from sage.misc.misc import is_in_string
from sage.misc.superseded import deprecation
from sage.misc.cachefunc import cached_method
from sage.interfaces.tab_completion import ExtraTabCompletion
import re
import os
import pexpect
import time
import platform
import string
GAP_DIR = os.path.join(DOT_SAGE, 'gap')
WORKSPACE = os.path.join(GAP_DIR, 'workspace-%s'%abs(hash(SAGE_LOCAL)))
GAP_BINARY = os.path.join(SAGE_LOCAL, 'bin', 'gap')
first_try = True
gap_cmd = "gap -r"
if platform.processor() == 'ia64' and os.path.exists('/usr/bin/prctl'):
# suppress unaligned access to 0x..., ip=0x... warnings
gap_cmd = 'prctl --unaligned=silent ' + gap_cmd
def gap_command(use_workspace_cache=True, local=True):
if use_workspace_cache:
if local:
return "%s -L %s"%(gap_cmd, WORKSPACE), False
else:
# TO DO: Use remote workspace
return gap_cmd, False
else:
return gap_cmd, True
############ Set the GAP memory pool size
# you should always use get_gap_memory_pool_size() to access this value
gap_memory_pool_size = None
def set_gap_memory_pool_size(size_in_bytes):
"""
Set the desired gap memory pool size.
Subsequently started GAP/libGAP instances will use this as
default. Currently running instances are unchanged.
GAP will only reserve ``size_in_bytes`` address space. Unless you
actually start a big GAP computation, the memory will not be
used. However, corresponding swap space will be reserved so that
GAP will always be able to use the reserved address space if
needed. While nothing is actually written to disc as long as you
don't run a big GAP computation, the reserved swap space will not
be available for other processes.
INPUT:
- ``size_in_bytes`` -- integer. The desired memory pool size.
EXAMPLES::
sage: from sage.interfaces.gap import \
... get_gap_memory_pool_size, set_gap_memory_pool_size
sage: n = get_gap_memory_pool_size()
sage: set_gap_memory_pool_size(n)
sage: n == get_gap_memory_pool_size()
True
sage: n # random output
1534059315
"""
global gap_memory_pool_size
gap_memory_pool_size = size_in_bytes
def get_gap_memory_pool_size():
"""
Get the gap memory pool size for new GAP processes.
EXAMPLES::
sage: from sage.interfaces.gap import \
... get_gap_memory_pool_size
sage: get_gap_memory_pool_size() # random output
1534059315
"""
global gap_memory_pool_size
if gap_memory_pool_size is not None:
return gap_memory_pool_size
from sage.misc.memory_info import MemoryInfo
mem = MemoryInfo()
suggested_size = max(mem.available_swap() // 10,
mem.available_ram() // 50)
# Don't eat all address space if the user set ulimit -v
suggested_size = min(suggested_size, mem.virtual_memory_limit()//10)
# ~220MB is the minimum for long doctests
suggested_size = max(suggested_size, 250 * 1024**2)
return suggested_size
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES:
sage: from sage.interfaces.gap import \
... _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m'
############ Classes with methods for both the GAP3 and GAP4 interface
class Gap_generic(ExtraTabCompletion, Expect):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
_identical_function = "IsIdenticalObj"
def _synchronize(self, timeout=0.5, cmd='%s;'):
"""
Synchronize GAP pexpect interface.
See the base method
:meth:`~sage.interfaces.expect.Expect._synchronize` for more
details.
We override this method since we are looking at GAP package
mode output, which is quite different from the normal
(human-readable) interface.
EXAMPLES::
sage: gap('"ok"')
ok
sage: gap._expect.sendline() # now we are out of sync
1
sage: gap._synchronize()
sage: gap(123)
123
"""
if self._expect is None:
return
E = self._expect
from sage.misc.prandom import randrange
rnd = randrange(2147483647)
cmd = str(rnd)+';'
try:
E.sendline(cmd)
E.expect('@[nf][@J\s>]*'+str(rnd), timeout=timeout)
E.send(' ')
E.expect('@i', timeout=timeout)
except pexpect.TIMEOUT:
self.interrupt()
except pexpect.EOF:
self._crash_msg()
self.quit()
def interrupt(self, tries=None, timeout=1, quit_on_fail=True):
"""
Interrupt the GAP process
Gap installs a SIGINT handler, we call it directly instead of
trying to sent Ctrl-C. Unlike
:meth:`~sage.interfaces.expect.Expect.interrupt`, we only try
once since we are knowing what we are doing.
Sometimes GAP dies while interrupting.
EXAMPLES::
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False);
''
sage: rc = gap.interrupt(timeout=1)
sage: [ gap(i) for i in range(10) ] # check that it is still working
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TESTS::
sage: gap('"finished computation"'); gap.interrupt(); gap('"ok"')
finished computation
True
ok
"""
E = self._expect
if E is None:
return True
# GAP oddity: If a computation is running and we send Ctrl-C,
# it is stopped as expected. But if we are at the idle prompt,
# nothing is happening UNTIL we run the next command (which is
# then immediately interrupted).
# There is apparently also a race in GAP between the signal
# handler and input, if we don't wait a bit the result is
# unpredictable.
E.sendline(chr(3))
time.sleep(0.1)
E.sendline()
try:
# send a dummy command
E.sendline('224433409;')
# read everything up to the actual output of the command
E.expect('@[nf][@J\s>]*224433409', timeout=timeout)
E.send(' ')
# the following input prompt should be the current input
# prompt but GAP might be too confused to display it
# E.expect('@i', timeout=timeout)
# Ideally, we would be finished here. But sometimes GAP
# thinks it is still inside a do/od block. So we run some
# more plain commands to get back into sync. These might
# either complete successfully (output "@n+<number>") or
# return a "Syntax error: od expected@J@f +<number>"
E.sendline()
time.sleep(0.1)
E.sendline('224433437;')
E.expect('@[nf][@J\s>]*224433437', timeout=timeout)
E.sendline()
time.sleep(0.1)
E.sendline('224433479;')
E.expect('@[nf][@J\s>]*224433479', timeout=timeout)
E.send(' ')
# the following input prompt is now the current input prompt
E.expect('@i', timeout=timeout)
success = True
except (pexpect.TIMEOUT, pexpect.EOF):
# GAP died or hangs indefinitely
success = False
if not success and quit_on_fail:
self.quit()
return success
def _assign_symbol(self):
r"""
Return the assign symbol in GAP.
TESTS::
sage: gap = Gap()
sage: print(gap._assign_symbol())
:=
"""
return ":="
def _quit_string(self):
"""
Returns the string used to quit GAP.
EXAMPLES::
sage: gap._quit_string()
'quit;'
::
sage: g = Gap()
sage: a = g(2); g.is_running()
True
sage: g.quit()
sage: g.is_running()
False
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
Returns the command use to read in a file in GAP.
EXAMPLES::
sage: gap._read_in_file_command('test')
'Read("test");'
::
sage: filename = tmp_filename()
sage: f = open(filename, 'w')
sage: f.write('xx := 22;\n')
sage: f.close()
sage: gap.read(filename)
sage: gap.get('xx').strip()
'22'
"""
return 'Read("%s");'%filename
def _continuation_prompt(self):
"""
Returns the continuation prompt in GAP.
EXAMPLES::
sage: gap._continuation_prompt()
'> '
"""
return '> '
def load_package(self, pkg, verbose=False):
"""
Load the Gap package with the given name.
If loading fails, raise a RuntimeError exception.
TESTS::
sage: gap.load_package("chevie")
Traceback (most recent call last):
...
RuntimeError: Error loading Gap package chevie. You may want to install the gap_packages and/or database_gap SPKGs.
"""
if verbose:
print("Loading GAP package %s" % pkg)
x = self.eval('LoadPackage("%s")'%pkg)
if x == 'fail':
raise RuntimeError("Error loading Gap package "+str(pkg)+". "+
"You may want to install the gap_packages and/or database_gap SPKGs.")
def eval(self, x, newlines=False, strip=True, split_lines=True, **kwds):
r"""
Send the code in the string s to the GAP interpreter and return the
output as a string.
INPUT:
- ``s`` - string containing GAP code.
- ``newlines`` - bool (default: True); if False,
remove all backslash-newlines inserted by the GAP output
formatter.
- ``strip`` - ignored
- ``split_lines`` -- bool (default: True); if True then each
line is evaluated separately. If False, then the whole
block of code is evaluated all at once.
EXAMPLES::
sage: gap.eval('2+2')
'4'
sage: gap.eval('Print(4); #test\n Print(6);')
'46'
sage: gap.eval('Print("#"); Print(6);')
'#6'
sage: gap.eval('4; \n 6;')
'4\n6'
sage: gap.eval('if 3>2 then\nPrint("hi");\nfi;')
'hi'
sage: gap.eval('## this is a test\nPrint("OK")')
'OK'
sage: gap.eval('Print("This is a test. Oh no, a #");# but this is a comment\nPrint("OK")')
'This is a test. Oh no, a #OK'
sage: gap.eval('if 4>3 then')
''
sage: gap.eval('Print("Hi how are you?")')
'Hi how are you?'
sage: gap.eval('fi')
''
"""
# '"
#We remove all of the comments: On each line, we try
#to find a pound sign. If we find it, we check to see if
#it is occurring in a string. If it is not in a string, we
#strip off the comment.
if not split_lines:
input_line=str(x)
else:
input_line = ""
for line in str(x).rstrip().split('\n'):
pound_position = line.find('#')
while pound_position != -1:
if not is_in_string(line, pound_position):
line = line[:pound_position]
pound_position = line.find('#',pound_position+1)
input_line += " "+line
if not input_line.endswith(';'):
input_line += ';'
result = Expect.eval(self, input_line, **kwds)
if not newlines:
result = result.replace("\\\n","")
return result.strip()
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
if self._expect is None: # interface is down
self._start()
E = self._expect
try:
if len(line) > 4095:
raise RuntimeError("Passing commands this long to gap would hang")
E.sendline(line)
except OSError:
raise RuntimeError("Error evaluating %s in %s"%(line, self))
if wait_for_prompt == False:
return ('','')
if len(line)==0:
return ('','')
try:
terminal_echo = [] # to be discarded
normal_outputs = [] # GAP stdout
error_outputs = [] # GAP stderr
current_outputs = terminal_echo
while True:
x = E.expect_list(self._compiled_full_pattern)
current_outputs.append(E.before)
if x == 0: # @p
if E.after != '@p1.':
print("Warning: possibly wrong version of GAP package interface\n")
print("Crossing fingers and continuing\n")
elif x == 1: #@@
current_outputs.append('@')
elif x == 2: #special char
current_outputs.append(chr(ord(E.after[1:2])-ord('A')+1))
elif x == 3: # garbage collection info, ignore
pass
elif x == 4: # @e -- break loop
E.sendline("quit;")
elif x == 5: # @c completion, doesn't seem to happen when -p is in use
print("I didn't think GAP could do this\n")
elif x == 6: # @f GAP error message
current_outputs = error_outputs;
elif x == 7: # @h help text, but this stopped happening with new help
print("I didn't think GAP could do this")
elif x == 8: # @i awaiting normal input
break;
elif x == 9: # @m finished running a child
pass # there is no need to do anything
elif x==10: #@n normal output line
current_outputs = normal_outputs;
elif x==11: #@r echoing input
current_outputs = terminal_echo
elif x==12: #@sN shouldn't happen
print("Warning: this should never happen")
elif x==13: #@w GAP is trying to send a Window command
print("Warning: this should never happen")
elif x ==14: #@x seems to be safely ignorable
pass
elif x == 15:#@z GAP starting a subprocess
pass # there is no need to do anything
except pexpect.EOF:
if not expect_eof:
raise RuntimeError("Unexpected EOF from %s executing %s"%(self,line))
except IOError:
raise RuntimeError("IO Error from %s executing %s"%(self,line))
return ("".join(normal_outputs),"".join(error_outputs))
def _keyboard_interrupt(self):
"""
TESTS:
We check that the gap interface behaves correctly after an
interrupt::
sage: gap(2)
2
sage: try:
....: alarm(0.5)
....: while True: SymmetricGroup(7).conjugacy_classes_subgroups()
....: except KeyboardInterrupt:
....: pass
sage: gap(2)
2
"""
self.quit()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
"""
Evaluate a line of commands.
REMARK:
By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
is evaluated using :meth:`_eval_line_using_file`.
If the command can not be evaluated since the interface
has crashed, it is automatically restarted and tried
again *once*.
If the optional ``wait_for_prompt`` is ``False`` then even a very long line
will not be evaluated by :meth:`_eval_line_using_file`, since this does not
support the ``wait_for_prompt`` option.
INPUT:
- ``line`` -- (string) a command.
- ``allow_use_file`` (optional bool, default ``True``) --
allow to evaluate long commands using :meth:`_eval_line_using_file`.
- ``wait_for_prompt`` (optional bool, default ``True``) --
wait until the prompt appears in the sub-process' output.
- ``restart_if_needed`` (optional bool, default ``True``) --
If it is ``True``, the command evaluation is evaluated
a second time after restarting the interface, if an
``EOFError`` occured.
TESTS::
sage: gap._eval_line('2+2;')
'4'
We test the ``wait_for_prompt`` option by sending a command that
creates an infinite loop in the GAP sub-process. But if we don't
wait for the prompt to appear in the output, we can interrupt
the loop without raising a KeyboardInterrupt. At the same time,
we test that the line is not forwarded to :meth:`_eval_line_using_file`,
since that method would not support the ``wait_for_prompt`` option::
sage: cutoff = gap._eval_using_file_cutoff
sage: gap._eval_using_file_cutoff = 4
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: gap._eval_using_file_cutoff = cutoff
The following tests against a bug fixed at :trac:`10296`::
sage: gap(3)
3
sage: gap.eval('quit;')
''
sage: a = gap(3)
** Gap crashed or quit executing '\$sage...:=3;;' **
Restarting Gap and trying again
sage: a
3
"""
#if line.find('\n') != -1:
# raise ValueError, "line must not contain any newlines"
E = None
try:
if self._expect is None:
self._start()
E = self._expect
#import pdb; pdb.set_trace()
if allow_use_file and wait_for_prompt and len(line) > self._eval_using_file_cutoff:
return self._eval_line_using_file(line)
(normal, error) = self._execute_line(line, wait_for_prompt=wait_for_prompt,
expect_eof= (self._quit_string() in line))
if len(error)> 0:
if 'Error, Rebuild completion files!' in error:
error += "\nRunning gap_reset_workspace()..."
self.quit()
gap_reset_workspace()
error = error.replace('\r','')
raise RuntimeError("%s produced error output\n%s\n executing %s"%(self, error,line))
if len(normal) == 0:
return ''
if isinstance(wait_for_prompt, str) and normal.ends_with(wait_for_prompt):
n = len(wait_for_prompt)
elif normal.endswith(self._prompt):
n = len(self._prompt)
elif normal.endswith(self._continuation_prompt()):
n = len(self._continuation_prompt())
else:
n = 0
out = normal[:-n]
if len(out) > 0 and out[-1] == "\n":
out = out[:-1]
return out
except (RuntimeError,TypeError) as message:
if 'EOF' in message[0] or E is None or not E.isalive():
print("** %s crashed or quit executing '%s' **" % (self, line))
print("Restarting %s and trying again" % self)
self._start()
if line != '':
return self._eval_line(line, allow_use_file=allow_use_file)
else:
return ''
else:
raise RuntimeError(message)
except KeyboardInterrupt:
self._keyboard_interrupt()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def unbind(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
sage: gap.unbind('x')
sage: gap.get('x')
Traceback (most recent call last):
...
RuntimeError: Gap produced error output
Error, Variable: 'x' must have a value
...
"""
self.eval('Unbind(%s)'%var)
self.clear(var)
def _contains(self, v1, v2):
"""
EXAMPLES::
sage: Integers = gap('Integers')
sage: two = gap(2)
sage: gap._contains(two.name(), Integers.name())
True
::
sage: 2 in gap('Integers')
True
"""
return self.eval('%s in %s'%(v1,v2)) == "true"
def _true_symbol(self):
"""
Returns the symbol for truth in GAP.
EXAMPLES::
sage: gap._true_symbol()
'true'
sage: gap(2) == gap(2)
True
"""
return "true"
def _false_symbol(self):
"""
Returns the symbol for falsity in GAP.
EXAMPLES::
sage: gap._false_symbol()
'false'
sage: gap(2) == gap(3)
False
"""
return "false"
def _equality_symbol(self):
"""
Returns the symbol for equality in GAP.
EXAMPLES::
sage: gap._equality_symbol()
'='
sage: gap(2) == gap(3)
False
sage: gap(2) == gap(2)
True
"""
return "="
def version(self):
"""
Returns the version of GAP being used.
EXAMPLES::
sage: print(gap.version())
4.8...
"""
return self.eval('VERSION')[1:-1]
def function_call(self, function, args=None, kwds=None):
"""
Calls the GAP function with args and kwds.
EXAMPLES::
sage: gap.function_call('SymmetricGroup', [5])
SymmetricGroup( [ 1 .. 5 ] )
If the GAP function does not return a value, but prints something
to the screen, then a string of the printed output is returned.
::
sage: s = gap.function_call('Display', [gap.SymmetricGroup(5).CharacterTable()])
sage: type(s)
<class 'sage.interfaces.interface.AsciiArtString'>
sage: s.startswith('CT')
True
TESTS:
If the function call is too long, two ``gap.eval`` calls are made
since returned values from commands in a file cannot be handled
properly::
sage: g = Gap()
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_very_very_very_long_name'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ () ] )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),SymmetricGroup( [ 1 .. 2 ] )) ]
When the command itself is so long that it warrants use of a temporary
file to be communicated to GAP, this does not cause problems since
the file will contain a single command::
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_name_so_very_very_very_long_that_even_by_itself_will_make_expect_use_a_file'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ () ] )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),SymmetricGroup( [ 1 .. 2 ] )) ]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
#Here we have to do some magic because not all GAP
#functions return a value. If you try to store their
#results to a variable, then GAP will complain. Thus, before
#we evaluate the function, we make it so that the marker string
#is in the 'last' variable in GAP. If the function returns a
#value, then that value will be in 'last', otherwise it will
#be the marker.
marker = '__SAGE_LAST__:="__SAGE_LAST__";;'
cmd = "%s(%s);;"%(function, ",".join([s.name() for s in args]+
['%s=%s'%(key,value.name()) for key, value in kwds.items()]))
if len(marker) + len(cmd) <= self._eval_using_file_cutoff:
# We combine the two commands so we only run eval() once and the
# only output would be from the second command
res = self.eval(marker+cmd)
else:
self.eval(marker)
res = self.eval(cmd)
if self.eval(self._identical_function + '(last,__SAGE_LAST__)') != 'true':
return self.new('last2;')
else:
if res.strip():
from sage.interfaces.expect import AsciiArtString
return AsciiArtString(res)
def get_record_element(self, record, name):
r"""
Return the element of a GAP record identified by ``name``.
INPUT:
- ``record`` -- a GAP record
- ``name`` -- str
OUTPUT:
- :class:`GapElement`
EXAMPLES::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: gap.get_record_element(rec, 'a')
1
sage: gap.get_record_element(rec, 'b')
2
TESTS::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: type(gap.get_record_element(rec, 'a'))
<class 'sage.interfaces.gap.GapElement'>
"""
return self('%s.%s' % (record.name(), name))
class GapElement_generic(ExtraTabCompletion, ExpectElement):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
def __repr__(self):
"""
EXAMPLES::
sage: gap(2)
2
"""
s = ExpectElement.__repr__(self)
if s.find('must have a value') != -1:
raise RuntimeError("An error occurred creating an object in %s from:\n'%s'\n%s"%(self.parent().name(), self._create, s))
return s
def bool(self):
"""
EXAMPLES::
sage: bool(gap(2))
True
sage: gap(0).bool()
False
sage: gap('false').bool()
False
"""
P = self._check_valid()
return self != P(0) and repr(self) != 'false'
def __len__(self):
"""
EXAMPLES::
sage: v = gap('[1,2,3]'); v
[ 1, 2, 3 ]
sage: len(v)
3
len is also called implicitly by if::
sage: if gap('1+1 = 2'):
....: print("1 plus 1 does equal 2")
1 plus 1 does equal 2
::
sage: if gap('1+1 = 3'):
....: print("it is true")
....: else:
....: print("it is false")
it is false
"""
P = self.parent()
if P.eval('%s = true'%self.name()) == 'true':
return 1
elif P.eval('%s = false'%self.name()) == 'true':
return 0
else:
return int(self.Length())
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: gap('"abc"').is_string()
True
sage: gap('[1,2,3]').is_string()
False
"""
return bool(self.IsString())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Gap matrix.
EXAMPLES::
sage: s = gap("(Z(7)^0)*[[1,2,3],[4,5,6]]"); s
[ [ Z(7)^0, Z(7)^2, Z(7) ], [ Z(7)^4, Z(7)^5, Z(7)^3 ] ]
sage: s._matrix_(GF(7))
[1 2 3]
[4 5 6]
::
sage: s = gap("[[1,2], [3/4, 5/6]]"); s
[ [ 1, 2 ], [ 3/4, 5/6 ] ]
sage: m = s._matrix_(QQ); m
[ 1 2]
[3/4 5/6]
sage: parent(m)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
::
sage: s = gap('[[Z(16),Z(16)^2],[Z(16)^3,Z(16)]]')
sage: s._matrix_(GF(16,'a'))
[ a a^2]
[a^3 a]
"""
v = self.DimensionsMat()
n = int(v[1])
m = int(v[2])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r,c]) for c in range(1,m+1)] for r in range(1,n+1)]
return M(entries)
############
class Gap(Gap_generic):
r"""
Interface to the GAP interpreter.
AUTHORS:
- William Stein and David Joyner
"""
def __init__(self, max_workspace_size=None,
maxread=None, script_subdirectory=None,
use_workspace_cache=True,
server=None,
server_tmpdir=None,
logfile=None,
seed=None):
"""
EXAMPLES::
sage: gap == loads(dumps(gap))
True
"""
self.__use_workspace_cache = use_workspace_cache
cmd, self.__make_workspace = gap_command(use_workspace_cache, server is None)
cmd += " -b -p -T"
if max_workspace_size is None:
max_workspace_size = _get_gap_memory_pool_size_MB()
cmd += ' -o ' + str(max_workspace_size)
cmd += ' -s ' + str(max_workspace_size)
cmd += ' -m 64m ' # attempt at a workaround for http://tracker.gap-system.org/issues/224
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','sage.g')
Expect.__init__(self,
name='gap',
prompt='gap> ',
command=cmd,
maxread=maxread,
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
restart_on_ctrlc=True,
verbose_start=False,
logfile=logfile,
eval_using_file_cutoff=100)
self.__seq = 0
self._seed = seed
def set_seed(self,seed=None):
"""
Sets the seed for gap interpeter.
The seed should be an integer.
EXAMPLES::
sage: g = Gap()
sage: g.set_seed(0)
0
sage: [g.Random(1,10) for i in range(5)]
[2, 3, 3, 4, 2]
"""
if seed is None:
seed = self.rand_seed()
self.eval("Reset(GlobalMersenneTwister,%d)" % seed)
self.eval("Reset(GlobalRandomSource,%d)" % seed)
self._seed = seed
return seed
def __reduce__(self):
"""
EXAMPLES::
sage: gap.__reduce__()
(<function reduce_load_GAP at 0x...>, ())
sage: f, args = _
sage: f(*args)
Gap
"""
return reduce_load_GAP, tuple([])
def _next_var_name(self):
r"""
Returns the next unused variable name.
Note that names starting with dollar signs are valid GAP
identifiers, but need to be escaped with a backslash starting
with GAP-4.8.
EXAMPLES::
sage: g = Gap()
sage: g._next_var_name()
'\\$sage1'
sage: g(2)^2
4
sage: g._next_var_name()
'\\$sage...'
"""
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return r'\$sage%s'%self.__seq
def _start(self):
"""
EXAMPLES::
sage: g = Gap()
sage: g.is_running()
False
sage: g._start()
sage: g.is_running()
True
sage: g.quit()
"""
if self.__use_workspace_cache:
try:
# Check to see if we need to auto-regenerate the gap
# workspace, i.e., if the gap script is more recent
# than the saved workspace, which signals that gap has
# been upgraded.
if os.path.getmtime(WORKSPACE) < os.path.getmtime(GAP_BINARY):
raise OSError("GAP workspace too old")
# Set the modification time of the workspace to the
# current time. This ensures the workspace doesn't
# get deleted too soon by gap_reset_workspace().
os.utime(WORKSPACE, None)
except OSError:
gap_reset_workspace(verbose=False)
global first_try
n = self._session_number
try:
Expect._start(self, "Failed to start GAP.")
except Exception:
if self.__use_workspace_cache and first_try:
first_try = False
self.quit()
gap_reset_workspace(verbose=False)
Expect._start(self, "Failed to start GAP.")
self._session_number = n
self.__make_workspace = False
else:
raise
if self.__use_workspace_cache and self.__make_workspace:
self.save_workspace()
# Now, as self._expect exists, we can compile some useful pattern:
self._compiled_full_pattern = self._expect.compile_pattern_list([
'@p\d+\.','@@','@[A-Z]','@[123456!"#$%&][^+]*\+',
'@e','@c','@f','@h','@i','@m','@n','@r','@s\d','@w.*\+','@x','@z'])
# read everything up to the first "ready" prompt
self._expect.expect("@i")
# set random seed
self.set_seed(self._seed)
def _function_class(self):
"""
Returns the GapFunction class.
EXAMPLES::
sage: gap._function_class()
<class 'sage.interfaces.gap.GapFunction'>
::
sage: type(gap.Order)
<class 'sage.interfaces.gap.GapFunction'>
"""
return GapFunction
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap.cputime()
sage: t #random
0.13600000000000001
sage: gap.Order(gap.SymmetricGroup(5))
120
sage: gap.cputime(t) #random
0.059999999999999998
"""
if t is not None:
return self.cputime() - t
else:
self.eval('_r_ := Runtimes();')
r = sum(eval(self.eval('[_r_.user_time, _r_.system_time, _r_.user_time_children, _r_.system_time_children]')))
return r/1000.0
def save_workspace(self):
r"""
Save the GAP workspace.
TESTS:
We make sure that #9938 (GAP does not start if the path to the GAP
workspace file contains more than 82 characters) is fixed::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = os.path.join(SAGE_TMP, "gap" + "0"*(80-len(SAGE_TMP)))
sage: gap = Gap()
sage: gap('3+2') # long time (4s on sage.math, 2013)
5
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# According to the GAP Reference Manual,
# [http://www.gap-system.org/Manuals/doc/htm/ref/CHAP003.htm#SSEC011.1]
# SaveWorkspace can only be used at the main gap> prompt. It cannot
# be included in the body of a loop or function, or called from a
# break loop.
from sage.misc.temporary_file import atomic_write
with atomic_write(WORKSPACE) as f:
f.close()
self.eval('SaveWorkspace("%s");'%(f.name), allow_use_file=False)
# Todo -- this -- but there is a tricky "when does it end" issue!
# Maybe do via a file somehow?
def help(self, s, pager=True):
"""
Print help on a given topic.
EXAMPLES::
sage: print(gap.help('SymmetricGroup', pager=False))
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
tmp_to_use = self._local_tmpfile()
if self.is_remote():
tmp_to_use = self._remote_tmpfile()
else:
tmp_to_use = self._local_tmpfile()
self.eval('SetGAPDocTextTheme("none")')
self.eval(r'\$SAGE.tempfile := "%s";'%tmp_to_use)
line = Expect.eval(self, "? %s"%s)
Expect.eval(self, "? 1")
match = re.search("Page from (\d+)", line)
if match is None:
print(line)
else:
(sline,) = match.groups()
if self.is_remote():
self._get_tmpfile()
F = open(self._local_tmpfile(),"r")
help = F.read()
if pager:
from IPython.core.page import page
page(help, start = int(sline)-1)
else:
return help
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
cmd = ('%s:=%s;;' % (var, value)).replace('\n','')
self._eval_line(cmd, allow_use_file=True)
def get(self, var, use_file=False):
"""
Get the string representation of the variable var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
if use_file:
tmp = self._local_tmpfile()
if os.path.exists(tmp):
os.unlink(tmp)
self.eval('PrintTo("%s", %s);'%(tmp,var), strip=False)
r = open(tmp).read()
r = r.strip().replace("\\\n","")
os.unlink(tmp)
return r
else:
return self.eval('Print(%s);'%var, newlines=False)
def _pre_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StartInteract();')
def _post_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StopInteract();')
def _eval_line_using_file(self, line):
i = line.find(':=')
if i != -1:
j = line.find('"')
if j >= 0 and j < i:
i = -1
if i == -1:
line0 = 'Print( %s );'%line.rstrip().rstrip(';')
try: # this is necessary, since Print requires something as input, and some functions (e.g., Read) return nothing.
return Expect._eval_line_using_file(self, line0)
except RuntimeError:
return ''
return Expect._eval_line_using_file(self, line)
def console(self):
"""
Spawn a new GAP command-line session.
EXAMPLES::
sage: gap.console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
"""
gap_console()
def _object_class(self):
"""
Returns the GapElement class.
EXAMPLES::
sage: gap._object_class()
<class 'sage.interfaces.gap.GapElement'>
sage: type(gap(2))
<class 'sage.interfaces.gap.GapElement'>
"""
return GapElement
def _function_element_class(self):
"""
Returns the GapFunctionElement class.
EXAMPLES::
sage: gap._function_element_class()
<class 'sage.interfaces.gap.GapFunctionElement'>
sage: type(gap.SymmetricGroup(4).Order)
<class 'sage.interfaces.gap.GapFunctionElement'>
"""
return GapFunctionElement
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: '{}' in gap._tab_completion()
False
sage: c = gap._tab_completion()
sage: len(c) > 100
True
sage: 'Order' in c
True
"""
names = eval(self.eval('NamesSystemGVars()')) + \
eval(self.eval('NamesUserGVars()'))
return [n for n in names if n[0] in string.ascii_letters]
############
def gap_reset_workspace(max_workspace_size=None, verbose=False):
r"""
Call this to completely reset the GAP workspace, which is used by
default when Sage first starts GAP.
The first time you start GAP from Sage, it saves the startup state
of GAP in a file ``$HOME/.sage/gap/workspace-HASH``, where ``HASH``
is a hash of the directory where Sage is installed.
This is useful, since then subsequent startup of GAP is at least 10
times as fast. Unfortunately, if you install any new code for GAP,
it won't be noticed unless you explicitly load it, e.g., with
gap.load_package("my_package")
The packages sonata, guava, factint, gapdoc, grape, design, toric,
and laguna are loaded in all cases before the workspace is saved,
if they are available.
TESTS:
Check that ``gap_reset_workspace`` still works when ``GAP_DIR``
doesn't exist, see :trac:`14171`::
sage: ORIGINAL_GAP_DIR = sage.interfaces.gap.GAP_DIR
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.GAP_DIR = os.path.join(tmp_dir(), "test_gap_dir")
sage: sage.interfaces.gap.WORKSPACE = os.path.join(sage.interfaces.gap.GAP_DIR, "test_workspace")
sage: os.path.isfile(sage.interfaces.gap.WORKSPACE) # long time
False
sage: gap_reset_workspace() # long time
sage: os.path.isfile(sage.interfaces.gap.WORKSPACE) # long time
True
sage: sage.interfaces.gap.GAP_DIR = ORIGINAL_GAP_DIR
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
Check that the race condition from :trac:`14242` has been fixed.
We temporarily need to change the worksheet filename. ::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = tmp_filename()
sage: from multiprocessing import Process
sage: import time
sage: gap = Gap() # long time (reset GAP session)
sage: P = [Process(target=gap, args=("14242",)) for i in range(4)]
sage: for p in P: # long time, indirect doctest
....: p.start()
....: time.sleep(0.2)
sage: for p in P: # long time
....: p.join()
sage: os.unlink(sage.interfaces.gap.WORKSPACE) # long time
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# Make sure GAP_DIR exists
try:
os.makedirs(GAP_DIR)
msg = "It is OK to delete all these cache files. They will be recreated as needed.\n"
open(os.path.join(GAP_DIR, 'README.txt'), 'w').write(msg)
except OSError:
if not os.path.isdir(GAP_DIR):
raise
# Delete all gap workspaces that haven't been used in the last
# week, to avoid needless cruft. I had an install on sage.math
# with 90 of these, since I run a lot of different versions of
# Sage, and it totalled 1.3GB of wasted space! See trac #4936.
# We only do this after creating a new workspace, since this cruft
# issue is only a problem if workspaces get created every so
# often. We don't want to have to do this on every startup.
now = time.time()
for F in os.listdir(GAP_DIR):
if F.startswith('workspace-'):
W = os.path.join(GAP_DIR, F)
try:
age = now - os.path.getatime(W)
if age >= 604800: # 1 week in seconds
os.unlink(W)
except OSError:
# It's not a problem if W doesn't exist, everything
# else is an error.
if os.path.exists(W):
raise
# Create new workspace with filename WORKSPACE
g = Gap(use_workspace_cache=False, max_workspace_size=None)
g.eval('SetUserPreference("HistoryMaxLines", 30)')
for pkg in ['GAPDoc', 'ctbllib', 'sonata', 'guava', 'factint', \
'gapdoc', 'grape', 'design', \
'toric', 'laguna', 'braid']:
# NOTE: Do *not* autoload hap - it screws up PolynomialRing(Rationals,2)
try:
g.load_package(pkg, verbose=verbose)
except RuntimeError as msg:
if verbose:
print('*** %s' % msg)
pass
# end for
g.save_workspace()
g.quit()
class GapElement(GapElement_generic):
def __getitem__(self, n):
"""
EXAMPLES::
sage: a = gap([1,2,3])
sage: a[1]
1
"""
self._check_valid()
if not isinstance(n, tuple):
return self.parent().new('%s[%s]'%(self._name, n))
else:
return self.parent().new('%s%s'%(self._name, ''.join(['[%s]'%x for x in n])))
def str(self, use_file=False):
"""
EXAMPLES::
sage: print(gap(2))
2
"""
if use_file:
P = self._check_valid()
return P.get(self.name(), use_file=True)
else:
return repr(self)
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
P = self._check_valid()
try:
s = P.eval('LaTeXObj(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: s5 = gap.SymmetricGroup(5)
sage: 'Centralizer' in s5._tab_completion()
True
"""
from sage.misc.misc import uniq
P = self.parent()
v = P.eval(r'\$SAGE.OperationsAdmittingFirstArgument(%s)'%self.name())
v = v.replace('Tester(','').replace('Setter(','').replace(')','').replace('\n', '')
v = v.split(',')
v = [ oper.split('"')[1] for oper in v ]
v = [ oper for oper in v if all(ch in string.ascii_letters for ch in oper) ]
v = uniq(v)
return v
class GapFunctionElement(FunctionElement):
def _sage_doc_(self):
"""
EXAMPLES::
sage: print(gap(4).SymmetricGroup._sage_doc_())
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
M = self._obj.parent()
help = M.help(self._name, pager=False)
return help
class GapFunction(ExpectFunction):
def _sage_doc_(self):
"""
EXAMPLES::
sage: print(gap.SymmetricGroup._sage_doc_())
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
M = self._parent
help = M.help(self._name, pager=False)
return help
def is_GapElement(x):
"""
Returns True if x is a GapElement.
EXAMPLES::
sage: from sage.interfaces.gap import is_GapElement
sage: is_GapElement(gap(2))
True
sage: is_GapElement(2)
False
"""
return isinstance(x, GapElement)
def gfq_gap_to_sage(x, F):
"""
INPUT:
- ``x`` -- GAP finite field element
- ``F`` -- Sage finite field
OUTPUT: element of ``F``
EXAMPLES::
sage: x = gap('Z(13)')
sage: F = GF(13, 'a')
sage: F(x)
2
sage: F(gap('0*Z(13)'))
0
sage: F = GF(13^2, 'a')
sage: x = gap('Z(13)')
sage: F(x)
2
sage: x = gap('Z(13^2)^3')
sage: F(x)
12*a + 11
sage: F.multiplicative_generator()^3
12*a + 11
TESTS:
Check that :trac:`18048` is fixed::
sage: K.<a> = GF(16)
sage: b = a^2 + a
sage: K(b._gap_())
a^2 + a
AUTHOR:
- David Joyner and William Stein
"""
s = str(x)
if s[:2] == '0*':
return F(0)
i1 = s.index("(")
i2 = s.index(")")
q = eval(s[i1+1:i2].replace('^','**'))
if not F.cardinality().is_power_of(q):
raise ValueError('%r has no subfield of size %r' % (F, q))
if s.find(')^') == -1:
e = 1
else:
e = int(s[i2+2:])
if F.degree() == 1:
g = F(gap.eval('Int(Z(%s))' % q))
elif F.is_conway():
f = (F.cardinality() - 1) // (q - 1)
g = F.multiplicative_generator() ** f
else:
raise ValueError('%r is not prime or defined by a Conway polynomial' % F)
return g**e
def intmod_gap_to_sage(x):
r"""
INPUT:
- x -- Gap integer mod ring element
EXAMPLES::
sage: a = gap(Mod(3, 18)); a
ZmodnZObj( 3, 18 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 18
sage: a = gap(Mod(3, 17)); a
Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(0, 17)); a
0*Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
0
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(3, 65537)); a
ZmodpZObj( 3, 65537 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 65537
"""
from sage.rings.finite_rings.all import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
s = str(x)
m = re.search(r'Z\(([0-9]*)\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(m.group(1)))
m = re.match(r'Zmod[np]ZObj\( ([0-9]*), ([0-9]*) \)', s)
if m:
return Mod(m.group(1), m.group(2))
raise ValueError("Unable to convert Gap element '%s'" % s)
#############
gap = Gap()
def reduce_load_GAP():
"""
Returns the GAP interface object defined in sage.interfaces.gap.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load_GAP
sage: reduce_load_GAP()
Gap
"""
return gap
# This is only for backwards compatibility, in order to be able
# to unpickle the invalid objects that are in the pickle jar.
def reduce_load():
"""
This is for backwards compatibility only.
To be precise, it only serves at unpickling the invalid
gap elements that are stored in the pickle jar.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load
sage: reduce_load()
doctest:...: DeprecationWarning: This function is only used to unpickle invalid objects
See http://trac.sagemath.org/18848 for details.
<repr(<sage.interfaces.gap.GapElement at ...>) failed:
ValueError: The session in which this object was defined is no longer running.>
By :trac:`18848`, pickling actually often works::
sage: loads(dumps(gap([1,2,3])))
[ 1, 2, 3 ]
"""
deprecation(18848, "This function is only used to unpickle invalid objects")
return GapElement(None, None)
def gap_console():
"""
Spawn a new GAP command-line session.
Note that in gap-4.5.7 you cannot use a workspace cache that had
no commandline to restore a gap session with commandline.
EXAMPLES::
sage: gap_console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
TESTS::
sage: import subprocess
sage: from sage.interfaces.gap import gap_command
sage: cmd = 'echo "quit;" | ' + gap_command(use_workspace_cache=False)[0]
sage: gap_startup = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
sage: 'http://www.gap-system.org' in gap_startup
True
sage: 'Error' not in gap_startup
True
sage: 'sorry' not in gap_startup
True
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
cmd, _ = gap_command(use_workspace_cache=False)
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','console.g')
os.system(cmd)
|
bulktasks.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import threading
from obs import const
from obs import util
if const.IS_PYTHON2:
import Queue as queue
else:
import queue
class ThreadPool(object):
def __init__(self, thread_size=const.DEFAULT_TASK_NUM, queue_size=const.DEFAULT_TASK_QUEUE_SIZE):
self.thread_size = thread_size
self._alive_threads = 0
self._task_queue = queue.Queue(queue_size)
self._threads = []
self._init_threads()
self._shutdown_lock = threading.Lock()
def _init_threads(self):
for i in range(self.thread_size):
self._alive_threads += 1
work_thread = threading.Thread(target = self._run)
self._threads.append(work_thread)
work_thread.start()
def _run(self):
task = self._task_queue.get()
while task is not None:
(func, args, kwargs, future) = task
if future is None:
result = func(*args, **kwargs)
else:
try:
result = func(*args, **kwargs)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
del task
task = self._task_queue.get()
def execute(self, func, *args, **kwargs):
task = (func, args, kwargs, None)
self._task_queue.put(task)
def submit(self, func, *args, **kwargs):
future = Future()
task = (func, args, kwargs, future)
self._task_queue.put(task)
return future
def shutdown(self, wait=True):
with self._shutdown_lock:
while self._alive_threads:
self._task_queue.put(None)
self._alive_threads -= 1
if wait:
for t in self._threads:
t.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class TimeoutError(Exception):
pass
PENDING = 'PENDING'
COMPLETED = 'COMPLETED'
class Future(object):
def __init__(self):
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._callback = None
def set_result(self, result):
with self._condition:
self._result = result
self._state = COMPLETED
self._condition.notify_all()
if self._callback:
self._callback(self)
def set_exception(self, exception):
with self._condition:
self._exception = exception
self._state = COMPLETED
self._condition.notify_all()
if self._callback:
self._callback(self)
def set_callback(self, callback):
with self._condition:
if self._state is PENDING:
self._callback = callback
return
callback(self)
def _get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def get_result(self, timeout=None):
with self._condition:
if self._state == COMPLETED:
return self._get_result()
self._condition.wait(timeout)
if self._state == COMPLETED:
return self._get_result()
else:
raise TimeoutError()
def get_exception(self, timeout=None):
with self._condition:
if self._state == COMPLETED:
return self._exception
self._condition.wait(timeout)
if self._state == COMPLETED:
return self._exception
else:
raise TimeoutError()
class ExecuteProgress(object):
def __init__(self):
self.successful_tasks = 0
self._successful_lock = threading.Lock()
self.failed_tasks = 0
self._failed_lock = threading.Lock()
self.finished_tasks = 0
self._finished_lock = threading.Lock()
self.total_tasks = 0
def _successful_increment(self):
with self._successful_lock:
self.successful_tasks += 1
return self.successful_tasks
def _failed_increment(self):
with self._failed_lock:
self.failed_tasks += 1
return self.failed_tasks
def _finished_increment(self):
with self._finished_lock:
self.finished_tasks += 1
return self.finished_tasks
def get_successful_tasks(self):
with self._successful_lock:
return self.successful_tasks
def get_failed_tasks(self):
with self._failed_lock:
return self.failed_tasks
def get_finished_tasks(self):
with self._finished_lock:
return self.finished_tasks
def get_total_tasks(self):
return self.total_tasks
def _reportProgress(progress, interval, progressCallback):
finishedTasks = progress._finished_increment()
if finishedTasks % interval == 0 or finishedTasks == progress.get_total_tasks():
successfulTasks = progress.get_successful_tasks()
failedTasks = progress.get_failed_tasks()
progressCallback(successfulTasks, failedTasks, progress.get_total_tasks())
def _checkBulkTasksPara(task_num, task_queue_size, task_interval, threshold):
origine = [task_num, task_queue_size, task_interval, threshold]
default = (const.DEFAULT_TASK_NUM, const.DEFAULT_TASK_QUEUE_SIZE, const.DEFAULT_BYTE_INTTERVAL, const.DEFAULT_MAXIMUM_SIZE)
size = len(origine)
for i in range(size):
origine[i] = util.to_int(origine[i])
if origine[i] is None or origine[i] <= 0:
origine[i] = default[i]
return tuple(origine)
|
data_utils.py
|
# Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import errno
import functools
import gc
import hashlib
import multiprocessing
import multiprocessing.dummy
import os
import random
import shutil
import signal
import sys
import tarfile
import threading
import time
import weakref
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from tensorflow.python.framework import ops
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
try:
import queue
except ImportError:
import Queue as queue
try:
import typing
is_iterator = lambda x: isinstance(x, typing.Iterator)
except ImportError:
# Python2 uses next, and Python3 should have typing so __next__ is not needed.
is_iterator = lambda x: hasattr(x, '__iter__') and hasattr(x, 'next')
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (ops.Tensor, np.ndarray) + builtin_iterators):
return False
return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or is_iterator(x)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter(object):
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Arguments:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
logging.warning(
'multiprocessing can interact badly with TensorFlow, causing '
'nondeterministic deadlocks. For high performance data pipelines tf.data '
'is recommended.')
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
@deprecation.deprecated('2020-06-07', 'Please manage pools using the standard '
'Python lib.')
@keras_export('keras.experimental.terminate_keras_multiprocessing_pools')
def terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False):
"""Destroy Keras' multiprocessing pools to prevent deadlocks.
In general multiprocessing.Pool can interact quite badly with other, seemingly
unrelated, parts of a codebase due to Pool's reliance on fork. This method
cleans up all pools which are known to belong to Keras (and thus can be safely
terminated).
Args:
grace_period: Time (in seconds) to wait for process cleanup to propagate.
use_sigkill: Boolean of whether or not to perform a cleanup pass using
SIGKILL.
Returns:
A list of human readable strings describing all issues encountered. It is up
to the caller to decide whether to treat this as an error condition.
"""
errors = []
# First cleanup the pools spawned by Keras. If we start killing workers and
# a parent pool is still alive it will just spawn replacements which we don't
# want.
gc.collect()
for pool in _DATA_POOLS:
pool.close()
pool.terminate()
# We do not join the pool, because that would wait forever if a worker
# refused to exit.
# Finally, delete our reference to the pool so that we do not block garbage
# collection.
del pool
# If there were any pools, sleep for a small grace period to allow everything
# to finalize.
if _DATA_POOLS:
time.sleep(grace_period)
# Now we kill any workers which are still alive. However we must compare
# the worker identifier to the set of identifiers which are known to have been
# spawned by pools belonging to Keras to avoid deleting unrelated workers.
# First we call the .terminate() method of a worker, and then if it still
# persists we directly send a signal to the process. Certain worker tasks may
# be able to gracefully handle shutdown, so we send a SIGTERM and then
# optionally follow up with a SIGKILL.
visited_workers = set()
cleanup_passes = ['.terminate', 'SIGTERM']
if use_sigkill:
cleanup_passes.append('SIGKILL')
cleanup_passes.append('log')
for cleanup_pass in cleanup_passes:
while True:
# In rare cases, queue.qsize() overestimates the number of elements. This
# loop is designed to be more robust.
try:
_WORKER_IDS.add(get_worker_id_queue().get_nowait())
except queue.Empty:
break
gc.collect()
workers_terminated_this_pass = False
for worker in multiprocessing.active_children():
ident = worker.ident
if ident in _WORKER_IDS and worker.is_alive():
try:
if cleanup_pass == '.terminate':
# First we ask nicely.
worker.terminate()
worker.join(timeout=grace_period)
visited_workers.add(ident)
workers_terminated_this_pass = True
elif cleanup_pass in ('SIGTERM', 'SIGKILL'):
# Then we ask increasingly tersely.
os.kill(worker.pid, signal.SIGKILL if cleanup_pass == 'SIGKILL'
else signal.SIGTERM)
workers_terminated_this_pass = True
elif cleanup_pass == 'log':
# And finally we give up and log the failure.
errors.append('worker still alive: {}, pid={}, hash={}'
.format(worker.name, worker.pid, hash(worker)))
except OSError:
# Worker exited since the start of this loop.
pass
if workers_terminated_this_pass:
# There can be a small propagation delay between worker destruction and
# workers reporting False for is_alive and no longer appearing in the
# list of active children. Once again, we sleep for a small grace period.
# This prevents false positives from workers which are simply still in the
# process of spinning down.
time.sleep(grace_period)
# Finally we remove the visited worker ids to handle the edge case that a
# pid is reused.
_WORKER_IDS.difference_update(visited_workers)
gc.collect()
for pool in _DATA_POOLS:
errors.append('pool still exists: {}, hash={}'.format(pool, hash(pool)))
return errors
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Arguments:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
|
probe.py
|
#!/usr/bin/env python
# Copyright (c) 2014, Paessler AG <support@paessler.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# PRTG Python Miniprobe
# Miniprobe needs at least Python 2.7 because of "importlib"
# If older python version is used you will have to install "importlib"
# import general modules
import sys
import json
import time
import gc
import logging
import socket
# import own modules
sys.path.append('./')
try:
from miniprobe import MiniProbe
import sensors
import requests
import multiprocessing
except Exception as e:
print(e)
# Implemented for internal testing only. Not for public usage!
http = False
if sys.argv[1:] and sys.argv[1] == "http":
http = True
class Probe(object):
def __init__(self):
gc.enable()
self.mini_probe = MiniProbe(http)
self.config = self.mini_probe.read_config('./probe.conf')
self.probe_stop = False
self.announce = False
self.task = False
self.has_json_content = False
self.data_request_payload_json = []
self.task_request_response_json = []
self.key_sha1 = self.mini_probe.hash_access_key(self.config['key'])
self.out_queue = multiprocessing.Queue()
self.sensor_list = self.mini_probe.get_import_sensors()
self.announce_json = json.dumps(self.mini_probe.build_announce(self.sensor_list))
self.announce_data = self.mini_probe.create_parameters(self.config, self.announce_json, 'announce')
self.url_announce = self.mini_probe.create_url(self.config, 'announce', http)
self.url_task = self.mini_probe.create_url(self.config, 'tasks', http)
self.task_data = self.mini_probe.build_task(self.config)
self.url_data = self.mini_probe.create_url(self.config, 'data', http)
self.procs = []
# Set up debug logging
self.logger = logging.getLogger("")
if self.config['debug'] == "True":
self.config['debug'] = True
self.logger.setLevel(logging.DEBUG)
logging.warning("DEBUG LOGGING HAS BEEN TURNED ON!!")
logging.getLogger("requests").setLevel(logging.INFO)
else:
self.config['debug'] = False
self.logger.setLevel(logging.INFO)
logging.info("Debug logging has been turned off!!")
logging.getLogger("requests").setLevel(logging.WARNING)
if self.config['cleanmem'] == "True":
self.config['cleanmem'] = True
else:
self.config['cleanmem'] = False
def send_announce(self):
"""
send announce request to core
"""
try:
announce_request = self.mini_probe.request_to_core("announce", self.announce_data, self.config)
if announce_request.status_code == requests.codes.ok:
self.announce = True
logging.info("Announce success.")
logging.debug("Announce success. Details: HTTP Status %s, Message: %s"
% (announce_request.status_code, announce_request.text))
else:
logging.info("Announce pending. Trying again in %s seconds"
% str(int(self.config['baseinterval']) / 2))
logging.debug("Announce pending. Details: HTTP Status %s, Message: %s"
% (announce_request.status_code, announce_request.text))
time.sleep(int(self.config['baseinterval']) / 2)
except Exception as request_exception:
logging.error(request_exception)
time.sleep(int(self.config['baseinterval']) / 2)
def get_tasks(self):
"""
get tasks from core
"""
self.data_request_payload_json = []
self.has_json_content = False
try:
task_request = self.mini_probe.request_to_core("tasks", self.task_data, self.config)
try:
if str(task_request.json()) != "[]":
self.has_json_content = True
self.task = True
logging.info("Task success.")
logging.debug("Task success. HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
return task_request
else:
logging.info("Task has no JSON content. Trying again in %s seconds"
% (int(self.config['baseinterval']) / 2))
logging.debug("Task has no JSON content. Details: HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
return None
except Exception as json_exception:
logging.info(json_exception)
logging.info("No JSON. HTTP Status: %s, Message: %s" % (task_request.status_code, task_request.text))
return None
except Exception as request_exception:
logging.error(request_exception)
logging.error("Exception. Trying again in %s seconds." % str(int(self.config['baseinterval']) / 3))
return None
def send_data(self):
"""
send processed data to the core
"""
try:
data_request = self.mini_probe.request_to_core("data", json.dumps(self.data_request_payload_json),
self.config)
if data_request.status_code == requests.codes.ok:
logging.info("Data success.")
logging.debug("Data success. Details: HTTP Status %s, Message: %s"
% (data_request.status_code, data_request.text))
self.data_request_payload_json = []
else:
logging.info("Data issue. Current data might be dropped, please turn on debug logging")
logging.debug("Data issue. Details: HTTP Status %s, Message: %s"
% (data_request.status_code, data_request.text))
except Exception as request_exception:
logging.error(request_exception)
def kill_procs(self):
"""
killing processes in worker pool when finished
"""
for p in self.procs:
if not p.is_alive():
p.join()
p.terminate()
del p
def main(self):
"""
Main routine for MiniProbe (Python)
"""
# Doing some startup logging
logging.info("PRTG Small Probe '%s' starting on '%s'" % (self.config['name'], socket.gethostname()))
logging.info("Connecting to PRTG Core Server at %s:%s" % (self.config['server'], self.config['port']))
while not self.announce:
self.send_announce()
while not self.probe_stop:
self.task = False
while not self.task:
task_request = self.get_tasks()
if not task_request:
time.sleep(int(self.config['baseinterval']) / 2)
gc.collect()
if task_request.status_code == requests.codes.ok and self.has_json_content:
self.task_request_response_json = task_request.json()
logging.debug("JSON response: %s" % self.task_request_response_json)
if self.config['subprocs']:
json_response_chunks = self.mini_probe.split_json_response(self.task_request_response_json,
self.config['subprocs'])
else:
json_response_chunks = self.mini_probe.split_json_response(self.task_request_response_json)
for element in json_response_chunks:
for part in element:
logging.debug(part)
for sensor in self.sensor_list:
if part['kind'] == sensor.get_kind():
p = multiprocessing.Process(target=sensor.get_data, args=(part, self.out_queue),
name=part['kind'])
self.procs.append(p)
logging.debug("Spawning sensor %s." % p.name)
p.start()
else:
pass
gc.collect()
try:
while len(self.data_request_payload_json) < len(element):
out = self.out_queue.get()
self.data_request_payload_json.append(out)
except Exception as data_queue_exception:
logging.error(data_queue_exception)
pass
self.send_data()
if len(self.task_request_response_json) > 10:
time.sleep((int(self.config['baseinterval']) * (9 / len(self.task_request_response_json))))
else:
time.sleep(int(self.config['baseinterval']) / 2)
elif task_request.status_code != requests.codes.ok:
logging.info("Task issue. Request returning incorrect status code. Turn on debugging for details")
logging.debug("Task issue. Details: HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
else:
logging.info("Task has no JSON content. Nothing to do. Waiting for %s seconds."
% str(int(self.config['baseinterval']) / 3))
time.sleep(int(self.config['baseinterval']) / 3)
self.kill_procs()
gc.collect()
if self.config['cleanmem']:
# checking if clean memory option has been chosen during install then call the method to flush mem
self.mini_probe.clean_mem()
sys.exit()
if __name__ == "__main__":
probe = Probe()
probe.main()
|
InMemoryTransport.py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import ufora.networking.Transport as Transport
import ufora.util.ManagedThread as ManagedThread
import ufora.util.Deferred as Deferred
class InMemoryTransport(Transport.Transport):
ConnectMessage = 'CONNECT:'
LogMessage = 'LOG:'
DisconnectMessage = 'DISCONNECT'
def __init__(self, channel):
self.channel = channel
self.onMessageReceived = None
self.onDisconnected = None
self.inputThread = None
self.isShuttingDown = False
def connect(self, credentials):
assert self.onMessageReceived is not None, \
"onMessageReceived callback must be set before connecting."
assert self.onDisconnected is not None, \
"onDisconnected callback must be set before connecting."
assert not self.isInInputThread(), \
"connect was called on an already connected transport."
deferred = Deferred.Deferred()
self.startInputLoop(deferred)
if credentials is not None:
self.channel.write('CONNECT:%s,%s' % credentials)
return deferred
def startInputLoop(self, deferred):
assert self.inputThread is None or not self.inputThread.is_alive()
self.inputThread = ManagedThread.ManagedThread(target=self.inputLoop, args=(deferred,))
self.inputThread.start()
def send(self, content):
self.channel.write(content)
def disconnect(self):
self.isShuttingDown = True
self.channel.write(InMemoryTransport.DisconnectMessage)
if not self.isInInputThread():
self.blockUntilFullyDisconnected()
def blockUntilFullyDisconnected(self):
self.inputThread.join()
def isInInputThread(self):
return self.inputThread is not None and \
threading.currentThread().ident == self.inputThread.ident
def inputLoop(self, connectDeferred):
try:
if connectDeferred and not self.waitForConnection(connectDeferred):
return
# this call doesn't return until the transport is shut down or gets disconnected
self.processIncomingMessages()
finally:
self.isShuttingDown = False
def processIncomingMessages(self):
while not self.isShuttingDown:
message = self.channel.get()
if message == InMemoryTransport.DisconnectMessage:
self.onDisconnected()
return
try:
self.onMessageReceived(message)
except TypeError as e:
print 'Error decoding message: %s\nMessage: %s' % (e, message)
def waitForConnection(self, connectDeferred):
message = None
while True:
message = self.channel.get()
if message.startswith(InMemoryTransport.ConnectMessage):
break
result = message[len(InMemoryTransport.ConnectMessage):].split(',')
if result[0] == 'OK':
connectDeferred.callback({
'login' : result[1],
'sharedStateId': result[2],
'displayName': result[3],
'sharedStateToken' : result[4]
})
return True
else:
connectDeferred.errback('Failed to connect')
return False
|
test_datasets.py
|
# coding: utf-8
import threading
import attr
import irl.exploration.datasets as D
def test_Trajectories():
Trans = attr.make_class("Transition", ("val", "done"))
ds = D.Trajectories(lambda traj: [x.val ** 2 for x in traj])
ds.concat([Trans(1, False), Trans(2, False), Trans(3, True)])
assert len(ds) == len(ds.data) == 3
assert ds[0] == 1
ds.append(Trans(4, False))
assert len(ds) == 3
assert len(ds.partial_trajectory) == 1
ds.append(Trans(5, True))
assert len(ds.partial_trajectory) == 2
assert len(ds) == 3
ds.terminate_trajectory()
assert len(ds) == 5
assert len(ds.partial_trajectory) == 0
ds.append(Trans(6, False))
ds.clear()
assert len(ds) == len(ds.partial_trajectory) == 0
def test_parallel_Trajectories():
ds = D.Trajectories()
class DummyWrite(threading.Thread):
def __init__(self):
super().__init__()
self.ds = ds.new_shared_trajectories()
def run(self):
self.ds.append(1)
threads = [DummyWrite() for _ in range(3)]
with ds.data_lock.writer():
for t in threads:
t.start()
for t in threads:
t.join()
assert len(ds) == 0
assert len(ds.partial_trajectory) == 0
for t in threads:
t.ds.terminate_trajectory()
assert len(ds) == 3
assert len(ds.partial_trajectory) == 0
def test_MemoryReplay():
ds = D.MemoryReplay(lambda x: x - 1, capacity=3)
for i in range(3):
ds.append(i)
assert ds[0] == -1
assert len(ds) == len(ds.data) == 3
ds.append(3)
assert ds[0] == 0
assert len(ds) == len(ds.data) == 3
ds.clear()
assert len(ds) == 0
def test_parallel_MemoryReplay():
ds = D.MemoryReplay()
def dummy_write():
ds.append(1)
threads = []
with ds.lock.writer():
for _ in range(3):
t = threading.Thread(target=dummy_write)
t.start()
threads.append(t)
assert len(ds.data) == 0
for t in threads:
t.join()
assert len(ds) == 3
|
BotAmino.py
|
from time import sleep as slp
from sys import exit
from json import dumps, load, loads
from pathlib import Path
from threading import Thread
from contextlib import suppress
from unicodedata import normalize
from string import punctuation
from random import choice
# from datetime import datetime
from .local_amino import Client, SubClient, ACM, objects
from uuid import uuid4
from inspect import getfullargspec
from urllib.request import urlopen
# from zipfile import ZipFile
import requests
import time
# this is Slimakoi's API with some of my patches
# API made by ThePhoenix78
# Big optimisation thanks to SempreLEGIT#1378 ♥
path_utilities = "utilities"
path_amino = f'{path_utilities}/amino_list'
path_client = "client.txt"
NoneType = type(None)
with suppress(Exception):
for i in (path_utilities, path_amino):
Path(i).mkdir(exist_ok=True)
def print_exception(exc):
print(repr(exc))
class Command:
def __init__(self):
self.commands = {}
self.conditions = {}
def execute(self, commande, data, type: str = "command"):
com = self.commands[type][commande]
arg = getfullargspec(com).args
arg.pop(0)
s = len(arg)
dico = {}
if s:
dico = {key: value for key, value in zip(arg, data.message.split()[0:s])}
if self.conditions[type].get(commande, None):
if self.conditions[type][commande](data):
return self.commands[type][commande](data, **dico)
return
return self.commands[type][commande](data, **dico)
def categorie_exist(self, type: str):
return type in self.commands.keys()
def add_categorie(self, type):
if type not in self.commands.keys():
self.commands[type] = {}
def add_condition(self, type):
if type not in self.conditions.keys():
self.conditions[type] = {}
def commands_list(self):
return [command for command in self.commands["command"].keys()]
def answer_list(self):
return [command for command in self.commands["answser"].keys()]
def command(self, name=None, condition=None):
type = "command"
self.add_categorie(type)
self.add_condition(type)
if isinstance(name, str):
name = [name]
elif not name:
name = []
def add_command(command_funct):
name.append(command_funct.__name__)
if callable(condition):
for command in name:
self.conditions[type][command] = condition
for command in name:
self.commands[type][command.lower()] = command_funct
return command_funct
return add_command
def answer(self, name, condition=None):
type = "answer"
self.add_categorie(type)
self.add_condition(type)
if isinstance(name, str):
name = [name]
elif not name:
name = []
def add_command(command_funct):
# name.append(command_funct.__name__)
if callable(condition):
for command in name:
self.conditions[type][command] = condition
for command in name:
self.commands[type][command.lower()] = command_funct
return command_funct
return add_command
def on_member_join_chat(self, condition=None):
type = "on_member_join_chat"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_member_leave_chat(self, condition=None):
type = "on_member_leave_chat"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_message(self, condition=None):
type = "on_message"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_other(self, condition=None):
type = "on_other"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_delete(self, condition=None):
type = "on_delete"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_remove(self, condition=None):
type = "on_remove"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_all(self, condition=None):
type = "on_all"
self.add_categorie(type)
self.add_condition(type)
if callable(condition):
self.conditions[type][type] = condition
def add_command(command_funct):
self.commands[type][type] = command_funct
return command_funct
return add_command
def on_event(self, name, condition=None):
type = "on_event"
self.add_categorie(type)
self.add_condition(type)
if isinstance(name, str):
name = [name]
elif not name:
name = []
def add_command(command_funct):
# name.append(command_funct.__name__)
if callable(condition):
for command in name:
self.conditions[type][command] = condition
for command in name:
self.commands[type][command] = command_funct
return command_funct
return add_command
class TimeOut:
users_dict = {}
def time_user(self, uid, end: int = 5):
if uid not in self.users_dict.keys():
self.users_dict[uid] = {"start": 0, "end": end}
Thread(target=self.timer, args=[uid]).start()
def timer(self, uid):
while self.users_dict[uid]["start"] <= self.users_dict[uid]["end"]:
self.users_dict[uid]["start"] += 1
slp(1)
del self.users_dict[uid]
def timed_out(self, uid):
if uid in self.users_dict.keys():
return self.users_dict[uid]["start"] >= self.users_dict[uid]["end"]
return True
class BannedWords:
def filtre_message(self, message, code):
para = normalize('NFD', message).encode(code, 'ignore').decode("utf8").strip().lower()
para = para.translate(str.maketrans("", "", punctuation))
return para
def check_banned_words(self, args, staff=True):
for word in ("ascii", "utf8"):
with suppress(Exception):
para = self.filtre_message(args.message, word).split()
if para != [""]:
with suppress(Exception):
checkme = [elem for elem in para if elem in args.subClient.banned_words]
if len(checkme) > 1 and staff:
args.subClient.delete_message(args.chatId, args.messageId, reason=f"Banned word : {choice(checkme)}", asStaff=staff)
if len(checkme) > 1:
args.subClient.delete_message(args.chatId, args.messageId, asStaff=staff)
class Parameters:
__slots__ = (
"subClient", "chatId", "authorId", "author", "message", "messageId",
"authorIcon", "comId", "replySrc", "replyMsg", "replyId", "info"
)
def __init__(self, data: objects.Event, subClient):
self.subClient = subClient
self.chatId = data.message.chatId
self.authorId = data.message.author.userId
self.author = data.message.author.nickname
self.message = data.message.content
self.messageId = data.message.messageId
self.authorIcon = data.message.author.icon
self.comId = data.comId
self.replySrc = None
self.replyId = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('mediaValue', None):
self.replySrc = data.message.extensions['replyMessage']['mediaValue'].replace('_00.', '_hq.')
self.replyId = data.message.extensions['replyMessage']['messageId']
self.replyMsg = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('content', None):
self.replyMsg = data.message.extensions['replyMessage']['content']
self.replyId = data.message.extensions['replyMessage']['messageId']
self.info: objects.Event = data
class BotAmino(Command, Client, TimeOut, BannedWords):
def __init__(self, email: str = None, password: str = None, sid: str = None, deviceId: str = None, proxies: str = None, certificatePath: str = None):
Command.__init__(self)
Client.__init__(self, deviceId=deviceId, certificatePath=certificatePath, proxies=proxies)
if email and password:
self.login(email=email, password=password)
elif sid:
self.login_sid(SID=sid)
else:
try:
with open(path_client, "r") as file_:
para = file_.readlines()
self.login(email=para[0].strip(), password=para[1].strip())
except FileNotFoundError:
with open(path_client, 'w') as file_:
file_.write('email\npassword')
print("Please enter your email and password in the file client.txt")
print("-----end-----")
exit(1)
self.communaute = {}
self.botId = self.userId
self.len_community = 0
self.perms_list = []
self.prefix = "!"
self.activity = False
self.wait = 0
self.bio = None
self.self_callable = False
self.no_command_message = ""
self.spam_message = "You are spamming, be careful"
self.lock_message = "Command locked sorry"
self.launched = False
def tradlist(self, sub):
sublist = []
for elem in sub:
with suppress(Exception):
val = self.get_from_code(f"http://aminoapps.com/u/{elem}").objectId
sublist.append(val)
continue
sublist.append(elem)
return sublist
def add_community(self, comId):
self.communaute[comId] = Bot(self, comId, self.prefix, self.bio, self.activity)
def get_community(self, comId):
return self.communaute[comId]
def is_it_bot(self, uid):
return uid == self.botId and not self.self_callable
def is_it_admin(self, uid):
return uid in self.perms_list
def get_wallet_amount(self):
return self.get_wallet_info().totalCoins
def generate_transaction_id(self):
return str(uuid4())
def start_video_chat(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 4,
"id": "2154531" # Need to change?
},
"t": 108
}
data = dumps(data)
self.send(data)
def start_screen_room(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = dumps(data)
self.send(data)
def join_screen_room(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = dumps(data)
self.send(data)
def start_voice_room(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = dumps(data)
self.send(data)
def end_voice_room(self, comId: str, chatId: str, joinType: int = 2):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
def show_online(self, comId):
data = {
"o": {
"actions": ["Browsing"],
"target": f"ndc://x{comId}/",
"ndcId": int(comId),
"id": "82333"
},
"t":304}
data = dumps(data)
slp(2)
self.send(data)
def check(self, args, *can, id_=None):
id_ = id_ if id_ else args.authorId
foo = {'staff': args.subClient.is_in_staff,
'bot': self.is_it_bot}
for i in can:
if foo[i](id_):
return True
def check_all(self):
amino_list = self.sub_clients()
for com in amino_list.comId:
try:
self.communaute[com].check_in()
except Exception:
pass
def threadLaunch(self, commu, passive: bool = False):
self.communaute[commu] = Bot(self, commu, self.prefix, self.bio, passive)
slp(30)
if passive:
self.communaute[commu].passive()
def launch(self, passive: bool = False):
amino_list = self.sub_clients()
self.len_community = len(amino_list.comId)
[Thread(target=self.threadLaunch, args=[commu, passive]).start() for commu in amino_list.comId]
if self.launched:
return
if self.categorie_exist("command") or self.categorie_exist("answer"):
self.launch_text_message()
if self.categorie_exist("on_member_join_chat"):
self.launch_on_member_join_chat()
if self.categorie_exist("on_member_leave_chat"):
self.launch_on_member_leave_chat()
if self.categorie_exist("on_other"):
self.launch_other_message()
if self.categorie_exist("on_remove"):
self.launch_removed_message()
if self.categorie_exist("on_delete"):
self.launch_delete_message()
if self.categorie_exist("on_all"):
self.launch_all_message()
self.launched = True
def single_launch(self, commu, passive: bool = False):
amino_list = self.sub_clients()
self.len_community = len(amino_list.comId)
Thread(target=self.threadLaunch, args=[commu, passive]).start()
if self.launched:
return
if self.categorie_exist("command") or self.categorie_exist("answer"):
self.launch_text_message()
if self.categorie_exist("on_member_join_chat"):
self.launch_on_member_join_chat()
if self.categorie_exist("on_member_leave_chat"):
self.launch_on_member_leave_chat()
if self.categorie_exist("on_other"):
self.launch_other_message()
if self.categorie_exist("on_remove"):
self.launch_removed_message()
if self.categorie_exist("on_delete"):
self.launch_delete_message()
if self.categorie_exist("on_all"):
self.launch_all_message()
self.launched = True
def message_analyse(self, data, type):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
Thread(target=self.execute, args=[type, args, type]).start()
def on_member_event(self, data, type):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
if not self.check(args, "bot"):
Thread(target=self.execute, args=[type, args, type]).start()
def launch_text_message(self):
def text_message(data):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
if "on_message" in self.commands.keys():
Thread(target=self.execute, args=["on_message", args, "on_message"]).start()
if self.check(args, 'staff', 'bot') and subClient.banned_words:
self.check_banned_words(args)
# elif subClient.banned_words:
# self.check_banned_words(args, False)
if not self.timed_out(args.authorId) and args.message.startswith(subClient.prefix) and not self.check(args, "bot"):
subClient.send_message(args.chatId, self.spam_message)
return
elif "command" in self.commands.keys() and args.message.startswith(subClient.prefix) and not self.check(args, "bot"):
print(f"{args.author} : {args.message}")
command = args.message.lower().split()[0][len(subClient.prefix):]
if command in subClient.locked_command:
subClient.send_message(args.chatId, self.lock_message)
return
args.message = ' '.join(args.message.split()[1:])
self.time_user(args.authorId, self.wait)
if command.lower() in self.commands["command"].keys():
Thread(target=self.execute, args=[command, args]).start()
elif self.no_command_message:
subClient.send_message(args.chatId, self.no_command_message)
return
elif "answer" in self.commands.keys() and args.message.lower() in self.commands["answer"] and not self.check(args, "bot"):
print(f"{args.author} : {args.message}")
self.time_user(args.authorId, self.wait)
Thread(target=self.execute, args=[args.message.lower(), args, "answer"]).start()
return
try:
@self.callbacks.event("on_text_message")
def on_text_message(data):
text_message(data)
except Exception:
@self.event("on_text_message")
def on_text_message(data):
text_message(data)
def launch_other_message(self):
for type_name in ("on_strike_message", "on_voice_chat_not_answered",
"on_voice_chat_not_cancelled", "on_voice_chat_not_declined",
"on_video_chat_not_answered", "on_video_chat_not_cancelled",
"on_video_chat_not_declined", "on_voice_chat_start", "on_video_chat_start",
"on_voice_chat_end", "on_video_chat_end", "on_screen_room_start",
"on_screen_room_end", "on_avatar_chat_start", "on_avatar_chat_end"):
try:
@self.callbacks.event(type_name)
def on_other_message(data):
self.message_analyse(data, "on_other")
except AttributeError:
@self.event(type_name)
def on_other_message(data):
self.message_analyse(data, "on_other")
def launch_all_message(self):
try:
for x in (self.chat_methods):
@self.event(self.chat_methods[x].__name__)
def on_all_message(data):
self.message_analyse(data, "on_all")
except AttributeError:
for x in (self.callbacks.chat_methods):
@self.callbacks.event(self.callbacks.chat_methods[x].__name__)
def on_all_message(data):
self.message_analyse(data, "on_all")
def launch_delete_message(self):
try:
@self.callbacks.event("on_delete_message")
def on_delete_message(data):
self.message_analyse(data, "on_delete")
except AttributeError:
@self.event("on_delete_message")
def on_delete_message(data):
self.message_analyse(data, "on_delete")
def launch_removed_message(self):
for type_name in ("on_chat_removed_message", "on_text_message_force_removed", "on_text_message_removed_by_admin", "on_delete_message"):
try:
@self.callbacks.event(type_name)
def on_chat_removed(data):
self.message_analyse(data, "on_remove")
except AttributeError:
@self.event(type_name)
def on_chat_removed(data):
self.message_analyse(data, "on_remove")
def launch_on_member_join_chat(self):
try:
@self.callbacks.event("on_group_member_join")
def on_group_member_join(data):
self.on_member_event(data, "on_member_join_chat")
except AttributeError:
@self.event("on_group_member_join")
def on_group_member_join(data):
self.on_member_event(data, "on_member_join_chat")
def launch_on_member_leave_chat(self):
try:
@self.callbacks.event("on_group_member_leave")
def on_group_member_leave(data):
self.on_member_event(data, "on_member_leave_chat")
except AttributeError:
@self.event("on_group_member_leave")
def on_group_member_leave(data):
self.on_member_event(data, "on_member_leave_chat")
class Bot(SubClient, ACM):
def __init__(self, client, community, prefix: str = "!", bio=None, activity=False) -> None:
self.client = client
self.marche = True
self.prefix = prefix
self.bio_contents = bio
self.activity = activity
if isinstance(community, int):
self.community_id = community
self.community = self.client.get_community_info(comId=self.community_id)
self.community_amino_id = self.community.aminoId
else:
self.community_amino_id = community
self.informations = self.client.get_from_code(f"http://aminoapps.com/c/{community}")
self.community_id = self.informations.json["extensions"]["community"]["ndcId"]
self.community = self.client.get_community_info(comId=self.community_id)
self.community_name = self.community.name
super().__init__(comId=self.community_id, profile=self.client.profile)
try:
self.community_leader_agent_id = self.community.json["agent"]["uid"]
except Exception:
self.community_leader_agent_id = "-"
try:
self.community_staff_list = self.community.json["communityHeadList"]
except Exception:
self.community_staff_list = ""
if self.community_staff_list:
self.community_leaders = [elem["uid"] for elem in self.community_staff_list if elem["role"] in (100, 102)]
self.community_curators = [elem["uid"] for elem in self.community_staff_list if elem["role"] == 101]
self.community_staff = [elem["uid"] for elem in self.community_staff_list]
if not Path(f'{path_amino}/{self.community_amino_id}.json').exists():
self.create_community_file()
old_dict = self.get_file_dict()
new_dict = self.create_dict()
def do(k, v): old_dict[k] = v
def undo(k): del old_dict[k]
[do(k, v) for k, v in new_dict.items() if k not in old_dict]
[undo(k) for k in new_dict.keys() if k not in old_dict]
self.update_file(old_dict)
# self.subclient = SubClient(comId=self.community_id, profile=client.profile)
self.banned_words = self.get_file_info("banned_words")
self.locked_command = self.get_file_info("locked_command")
self.message_bvn = self.get_file_info("welcome")
self.welcome_chat = self.get_file_info("welcome_chat")
self.prefix = self.get_file_info("prefix")
self.favorite_users = self.get_file_info("favorite_users")
self.favorite_chats = self.get_file_info("favorite_chats")
self.update_file()
self.activity_status("on")
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def create_community_file(self):
with open(f'{path_amino}/{self.community_amino_id}.json', 'w', encoding='utf8') as file:
dict = self.create_dict()
file.write(dumps(dict, sort_keys=False, indent=4))
def create_dict(self):
return {"welcome": "", "prefix": self.prefix, "welcome_chat": "", "locked_command": [], "favorite_users": [], "favorite_chats": [], "banned_words": []}
def get_dict(self):
return {"welcome": self.message_bvn, "prefix": self.prefix, "welcome_chat": self.welcome_chat, "locked_command": self.locked_command,
"favorite_users": self.favorite_users, "favorite_chats": self.favorite_chats, "banned_words": self.banned_words}
def update_file(self, dict=None):
if not dict:
dict = self.get_dict()
with open(f"{path_amino}/{self.community_amino_id}.json", "w", encoding="utf8") as file:
file.write(dumps(dict, sort_keys=False, indent=4))
def get_file_info(self, info: str = None):
with open(f"{path_amino}/{self.community_amino_id}.json", "r", encoding="utf8") as file:
return load(file)[info]
def get_file_dict(self, info: str = None):
with open(f"{path_amino}/{self.community_amino_id}.json", "r", encoding="utf8") as file:
return load(file)
def get_banned_words(self):
return self.banned_words
def set_prefix(self, prefix: str):
self.prefix = prefix
self.update_file()
def set_welcome_message(self, message: str):
self.message_bvn = message.replace('"', '“')
self.update_file()
def set_welcome_chat(self, chatId: str):
self.welcome_chat = chatId
self.update_file()
def add_favorite_users(self, value: str):
self.favorite_users.append(value)
self.update_file()
def add_favorite_chats(self, value: str):
self.favorite_chats.append(value)
self.update_file()
def add_banned_words(self, liste: list):
self.banned_words.extend(liste)
self.update_file()
def add_locked_command(self, liste: list):
self.locked_command.extend(liste)
self.update_file()
def remove_favorite_users(self, value: str):
liste = [value]
[self.favorite_users.remove(elem) for elem in liste if elem in self.favorite_users]
self.update_file()
def remove_favorite_chats(self, value: str):
liste = [value]
[self.favorite_chats.remove(elem) for elem in liste if elem in self.favorite_chats]
self.update_file()
def remove_banned_words(self, liste: list):
[self.banned_words.remove(elem) for elem in liste if elem in self.banned_words]
self.update_file()
def remove_locked_command(self, liste: list):
[self.locked_command.remove(elem) for elem in liste if elem in self.locked_command]
self.update_file()
def unset_welcome_chat(self):
self.welcome_chat = ""
self.update_file()
def is_in_staff(self, uid):
return uid in self.community_staff
def is_leader(self, uid):
return uid in self.community_leaders
def is_curator(self, uid):
return uid in self.community_curators
def is_agent(self, uid):
return uid == self.community_leader_agent_id
def copy_bubble(self, chatId: str, replyId: str, comId: str = None):
if not comId:
comId = self.community_id
header = {
'Accept-Language': 'en-US',
'Content-Type': 'application/octet-stream',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 7.1; LG-UK495 Build/MRA58K; com.narvii.amino.master/3.3.33180)',
'Host': 'service.narvii.com',
'Accept-Encoding': 'gzip',
'Connection': 'Keep-Alive',
}
a = self.get_message_info(chatId=chatId, messageId=replyId).json["chatBubble"]["resourceUrl"]
with urlopen(a) as zipresp:
yo = zipresp.read()
response = requests.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=yo, headers=header)
bid = loads(response.text)['chatBubble']['bubbleId']
response = requests.post(f"https://service.narvii.com/api/v1/{comId}/s/chat/chat-bubble/{bid}", data=yo, headers=header)
def accept_role(self, rid: str = None):
with suppress(Exception):
self.accept_organizer(rid)
return True
with suppress(Exception):
self.promotion(noticeId=rid)
return True
return False
def get_staff(self, community):
if isinstance(community, int):
with suppress(Exception):
community = self.client.get_community_info(com_id=community)
else:
try:
informations = self.client.get_from_code(f"http://aminoapps.com/c/{community}")
except Exception:
return False
community_id = informations.json["extensions"]["community"]["ndcId"]
community = self.client.get_community_info(comId=community_id)
try:
community_staff_list = community.json["communityHeadList"]
community_staff = [elem["uid"] for elem in community_staff_list]
except Exception:
community_staff_list = ""
else:
return community_staff
def get_user_id(self, name_or_id):
members = self.get_all_users(size=1).json['userProfileCount']
start = 0
lower_name = None
while start <= members:
users = self.get_all_users(start=start, size=100).json['userProfileList']
for user in users:
name = user['nickname']
uid = user['uid']
if name_or_id == name or name_or_id == uid:
return (name, uid)
if not lower_name and name_or_id.lower() in name.lower():
lower_name = (name, uid)
start += 100
return lower_name if lower_name else None
def ask_all_members(self, message, lvl: int = 20, type_bool: int = 1):
def ask(uid):
print(uid)
try:
self.start_chat(userId=[uid], message=message)
except Exception:
self.start_chat(userId=[uid], message=message)
size = self.get_all_users(start=0, size=1, type="recent").json['userProfileCount']
st = 0
while size > 0:
value = size
if value > 100:
value = 100
users = self.get_all_users(start=st, size=value)
if type_bool == 1:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] == lvl]
elif type_bool == 2:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] <= lvl]
elif type_bool == 3:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] >= lvl]
size -= 100
st += 100
def ask_amino_staff(self, message):
self.start_chat(userId=self.community_staff, message=message)
def get_chat_id(self, chat: str = None):
with suppress(Exception):
return self.get_from_code(f"http://aminoapps.com/c/{chat}").objectId
val = self.get_public_chat_threads()
for title, chat_id in zip(val.title, val.chatId):
if chat == title:
return chat_id
for title, chat_id in zip(val.title, val.chatId):
if chat.lower() in title.lower() or chat == chat_id:
return chat_id
return False
def stop_instance(self):
self.marche = False
def start_instance(self):
self.marche = True
Thread(target=self.passive).start()
def leave_amino(self):
self.marche = False
for elem in self.get_public_chat_threads().chatId:
with suppress(Exception):
self.leave_chat(elem)
self.client.leave_community(comId=self.community_id)
def check_new_member(self):
if not (self.message_bvn or self.welcome_chat):
return
new_list = self.get_all_users(start=0, size=25, type="recent")
new_member = [(elem["nickname"], elem["uid"]) for elem in new_list.json["userProfileList"]]
for elem in new_member:
name, uid = elem[0], elem[1]
val = self.get_wall_comments(userId=uid, sorting='newest').commentId
if not val and self.message_bvn:
with suppress(Exception):
self.comment(message=self.message_bvn, userId=uid)
if not val and self.welcome_chat:
with suppress(Exception):
self.send_message(chatId=self.welcome_chat, message=f"Welcome here @{name}!", mentionUserIds=[uid])
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def welcome_new_member(self):
new_list = self.get_all_users(start=0, size=25, type="recent")
new_member = [(elem["nickname"], elem["uid"]) for elem in new_list.json["userProfileList"]]
for elem in new_member:
name, uid = elem[0], elem[1]
val = self.get_wall_comments(userId=uid, sorting='newest').commentId
if not val and uid not in self.new_users and self.message_bvn:
with suppress(Exception):
self.comment(message=self.message_bvn, userId=uid)
if uid not in self.new_users and self.welcome_chat:
with suppress(Exception):
self.send_message(chatId=self.welcome_chat, message=f"Welcome here @{name}!", mentionUserIds=[uid])
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def feature_chats(self):
for elem in self.favorite_chats:
with suppress(Exception):
self.favorite(time=2, chatId=elem)
def feature_users(self):
featured = [elem["uid"] for elem in self.get_featured_users().json["userProfileList"]]
for elem in self.favorite_users:
if elem not in featured:
with suppress(Exception):
self.favorite(time=1, userId=elem)
def get_member_level(self, uid):
return self.get_user_info(userId=uid).level
def get_member_titles(self, uid):
with suppress(Exception):
return self.get_user_info(userId=uid).customTitles
return False
def get_wallet_amount(self):
return self.client.get_wallet_info().totalCoins
def generate_transaction_id(self):
return str(uuid4())
def pay(self, coins: int = 0, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
if not transactionId:
transactionId = self.generate_transaction_id()
self.send_coins(coins=coins, blogId=blogId, chatId=chatId, objectId=objectId, transactionId=transactionId)
def favorite(self, time: int = 1, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
self.feature(time=time, userId=userId, chatId=chatId, blogId=blogId, wikiId=wikiId)
def unfavorite(self, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
self.unfeature(userId=userId, chatId=chatId, blogId=blogId, wikiId=wikiId)
def join_chatroom(self, chat: str = None, chatId: str = None):
if not chat:
with suppress(Exception):
self.join_chat(chatId)
return ""
with suppress(Exception):
chati = self.get_from_code(f"{chat}").objectId
self.join_chat(chati)
return chat
chats = self.get_public_chat_threads()
for title, chat_id in zip(chats.title, chats.chatId):
if chat == title:
self.join_chat(chat_id)
return title
chats = self.get_public_chat_threads()
for title, chat_id in zip(chats.title, chats.chatId):
if chat.lower() in title.lower() or chat == chat_id:
self.join_chat(chat_id)
return title
return False
def start_screen_room(self, chatId: str, joinType: int = 1):
self.client.join_video_chat(comId=self.community_id, chatId=chatId, joinType=joinType)
def start_video_chat(self, chatId: str, joinType: int = 1):
self.client.join_video_chat(comId=self.community_id, chatId=chatId, joinType=joinType)
def start_voice_room(self, chatId: str, joinType: int = 1):
self.client.join_voice_chat(comId=self.community_id, chatId=chatId, joinType=joinType)
def join_screen_room(self, chatId: str, joinType: int = 1):
self.client.join_video_chat_as_viewer(comId=self.community_id, chatId=chatId, joinType=joinType)
def get_chats(self):
return self.get_public_chat_threads()
def join_all_chat(self):
for elem in self.get_public_chat_threads(type="recommended", start=0, size=100).chatId:
with suppress(Exception):
self.join_chat(elem)
def leave_all_chats(self):
for elem in self.get_public_chat_threads(type="recommended", start=0, size=100).chatId:
with suppress(Exception):
self.leave_chat(elem)
def follow_user(self, uid):
self.follow(userId=[uid])
def unfollow_user(self, uid):
self.unfollow(userId=uid)
def add_title(self, uid: str, title: str, color: str = None):
member = self.get_member_titles(uid)
try:
titles = [i['title'] for i in member] + [title]
colors = [i['color'] for i in member] + [color]
except TypeError:
titles = [title]
colors = [color]
self.edit_titles(uid, titles, colors)
return True
def remove_title(self, uid: str, title: str):
member = self.get_member_titles(uid)
tlist = []
clist = []
for t in member:
if t["title"] != title:
tlist.append(t["title"])
clist.append(t["color"])
self.edit_titles(uid, tlist, clist)
return True
def passive(self):
def upt_activity():
timeNow = int(time.time())
timeEnd = timeNow + 300
try:
self.send_active_obj(startTime=timeNow, endTime=timeEnd)
except Exception:
pass
def change_bio_and_welcome_members():
if self.welcome_chat or self.message_bvn:
Thread(target=self.welcome_new_member).start()
try:
self.activity_status('on')
if isinstance(self.bio_contents, list):
self.edit_profile(content=choice(self.bio_contents))
elif isinstance(self.bio_contents, str):
self.edit_profile(content=self.bio_contents)
except Exception as e:
print_exception(e)
def feature_chats():
try:
Thread(target=self.feature_chats).start()
except Exception as e:
print_exception(e)
def feature_users():
try:
Thread(target=self.feature_users).start()
except Exception as e:
print_exception(e)
feature_chats()
feature_users()
j = 0
k = 0
while self.marche:
change_bio_and_welcome_members()
if j >= 24:
feature_chats()
j = 0
if k >= 288:
feature_users()
k = 0
if self.activity:
upt_activity()
slp(300)
j += 1
k += 1
|
FileEmulator.py
|
#!/usr/bin/python
# Wrote by: Aaron Baker
from classes.RepeatedTimer import RepeatedTimer
from classes.HttpRequestHandler import HttpRequestHandler
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import threading
import urllib.request
import colorama # pip install
from colorama import Fore, Back, Style
global Get_Count
# This is used for getting information from the server
def Get_From_Server():
global Get_Count
global GetFromServer
print(Fore.LIGHTYELLOW_EX + "[+] Client Requesting: {%s}" % Get_Count)
url = 'http://127.0.0.1:8085/index.html/'
response = urllib.request.urlopen(url)
data = response.read()
text = data.decode('utf-8')
Get_Count += 1
if Get_Count % 15 == 0:
GetFromServer.stop()
time.sleep(5)
GetFromServer = RepeatedTimer(0.20, Get_From_Server)
return
# This is used for setting up and starting the server
def StartServer():
# Server settings
server_address = ('127.0.0.1', 8085)
httpd = HTTPServer(server_address, HttpRequestHandler)
print(Fore.LIGHTGREEN_EX + 'running server...\n')
Style.RESET_ALL
Fore.YELLOW
httpd.serve_forever()
return
# Default main
def main():
global GetFromServer
global Serverthread
global Get_Count
Get_Count = 0
colorama.init()
print(Fore.LIGHTRED_EX + "Starting server")
Serverthread = threading.Thread(target=StartServer)
Serverthread.setDaemon(True)
Serverthread.start()
GetFromServer = RepeatedTimer(0.20, Get_From_Server)
while True:
time.sleep(1000)
return
# Exiting the console and stopping HTTP Server
def exit_gracefully():
global GetFromServer
GetFromServer.stop()
print("Stopping...")
return
# Default main call
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
exit_gracefully();
|
inetstat.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import pwd
import glob
import re
import socket
import time
import struct
from collections import OrderedDict
from multiprocessing import Process, Manager, cpu_count
try:
import _pickle as pickle # Python 3.5 import of cPickle
except ImportError:
import pickle
python_exec = sys.executable
python_version = sys.version
current_path = os.getcwd()
script_path = os.path.dirname(os.path.realpath(__file__))
python_path = os.path.dirname(python_exec)
running_arch = "{0} bits".format(8 * struct.calcsize("P"))
threads_available = int(cpu_count() / 2) + 1
system_pid_pattern = '/proc/[0-9]*/fd/[0-9]*'
process_state_pattern = re.compile(r"\(([A-Za-z0-9_]+)\)")
sc_clk_tck = os.sysconf_names['SC_CLK_TCK']
clock_tick = os.sysconf(sc_clk_tck)
kernel_tcp4_info = '/proc/net/tcp'
tcp_timers = {'00': 'z_no_timer', '01': 'retransmit', '02': 'keep_alive', '03': 'time_wait', '04': 'window_probe'}
tcp_states = {'01': 'ESTABLISHED', '02': 'SYN_SENT', '03': 'SYN_RECV', '04': 'FIN_WAIT1', '05': 'FIN_WAIT2',
'06': 'TIME_WAIT', '07': 'CLOSE', '08': 'CLOSE_WAIT', '09': 'LAST_ACK', '0A': 'LISTEN', '0B': 'CLOSING'}
umask_octal_codes = {'0': 'rwx', '1': 'rw-', '2': 'r-x', '3': 'r--', '4': '-wx', '5': '-w-', '6': '--x', '7': '---'}
umask_special_bits = {'0': '', '1': 'Sticky', '2': 'SGID', '4': 'SUID'}
def split_lists(original_list, max_slices):
""" Split a list into a list of small lists given the desired number of sub lists """
slices = max_slices - 1
original_list_size = len(original_list)
split_index = int(original_list_size / slices)
return [original_list[x:x + split_index] for x in range(0, len(original_list), split_index)]
def hex2dec(hex_value):
""" Returns a decimal representation of a given hex value"""
return str(int(hex_value, 16))
def reversed_endian2octets(hex_ip):
""" IPs on /proc/net/tcp are stored as big-endian value interpreted as per the machine "endianness", which means it
ends up reversed on little-endian machines """
reversed_bytes = [hex_ip[6:8], hex_ip[4:6], hex_ip[2:4], hex_ip[0:2]]
octets = [hex2dec(_oct) for _oct in reversed_bytes]
return '.'.join(octets)
def get_pid_of_inode(inode):
""" Check which running PID is using the given inode """
for inode_pid in glob.glob(system_pid_pattern):
try:
if re.search(inode, os.readlink(inode_pid)):
return inode_pid.split('/')[2]
except FileNotFoundError:
return '-NA-'
def umask_human_representation(umask):
""" Returns a string with a human readable representation of a given umask """
_machine_reading_umask = str(umask)[::-1]
_other = umask_octal_codes[_machine_reading_umask[0]]
_group = umask_octal_codes[_machine_reading_umask[1]]
_user = umask_octal_codes[_machine_reading_umask[2]]
try:
_special = umask_special_bits[_machine_reading_umask[3]]
except IndexError:
_special = ''
human_readable_umask = "{0}{1}{2}{3}".format(_special, _user, _group, _other)
return human_readable_umask
def get_process_info(pid_number):
""" Check relevant data about a given process using it's Kernel representation on /proc filesystem. It returns the
process Name, State, Threads owned by it, VmRSS memory taken by it and it's permissions """
process_status_file = "/proc/{0}/status".format(str(pid_number))
process_status_dict = dict()
try:
with open(process_status_file, 'r') as proc_status:
_status = proc_status.readlines()
for _item in _status:
_item, _value = [i.lstrip().rstrip() for i in _item.split(":")]
process_status_dict.update({_item: _value})
except IOError:
return {'pname': '---', 'pumask': '---', 'pstate': '---', 'th': '---', 'pmem': '---'}
_name = process_status_dict['Name']
_umask = umask_human_representation(process_status_dict['Umask'])
if "(" and ")" in process_status_dict['State']:
_state = re.findall(process_state_pattern, process_status_dict['State'])[0]
else:
_state = process_status_dict['State']
_threads = process_status_dict['Threads']
_mem = process_status_dict['VmRSS']
return {'pname': _name, 'pumask': _umask, 'pstate': _state, 'th': _threads, 'pmem': _mem}
def timers_and_jiffies(tcp_timer, jiffy):
""" Use Kernel constant values for clock in Hz and the jiffy values given by /proc/net/tcp to describe the type of
timer (tcp_timer_type) associated with a connection and it's current time countdown in seconds (tcp_timer) """
tcp_timer_type = tcp_timers[tcp_timer]
_time = int(int(hex2dec(jiffy)) / clock_tick) # int int to round secs (human-readable value)
tcp_timer = _time if _time > 0 else 0
return tcp_timer_type, tcp_timer
class MinimalWhois(object):
def __init__(self):
""" This is my minimalistic whois implementation using sockets. It's inherited by INetStat class, and it's
purpose is to return ASN related information against a given IP address """
self.whois_host = "whois.cymru.com"
self.whois_port, self.ipcheck_port = 43, 80
self.ipcheck_address = "8.8.8.8"
self.timeout = 2
self.object_flags = " -v {0}\r\n"
self.socket = None
self.sock_family, self.sock_type = socket.AF_INET, socket.SOCK_STREAM
self.sock_type_2 = socket.SOCK_DGRAM
self.local_ip = self.check_local_ip()
def check_local_ip(self):
""" As long as I'm already using sockets, let's use a socket connection to 8.8.8.8 to get our local IP address
as any other method will return 127.0.0.1 as per all other Linux methods characteristics """
self.socket = socket.socket(self.sock_family, self.sock_type_2)
try:
self.socket.connect((self.ipcheck_address, self.ipcheck_port))
return self.socket.getsockname()[0]
except socket.error as socket_error:
print('Socket Error:', socket_error)
sys.exit(1)
def lookup(self, ip_address):
""" Performs socket connection with "whois.cymru.com" passing the given IP as flag and returns response """
self.socket = socket.socket(self.sock_family, self.sock_type)
_response = b''
try:
self.socket.settimeout(self.timeout)
self.socket.connect((self.whois_host, self.whois_port))
self.socket.send(bytes(self.object_flags.format(ip_address).encode()))
while True:
_data = self.socket.recv(4096)
_response += _data
if not _data:
break
self.socket.close()
except socket.error:
return None
_response = _response.decode('utf-8', 'replace')
return _response
def parse_data(self, dictionary, ip_address):
""" Receives a multiprocessing managed dictionary and an IP address to perform a lookup method and parse
all the returned information concerning the IP's ASN information. Retries 3 times in case of a timeout """
_retries = 3
_whois_data = self.lookup(ip_address)
while _whois_data is None and _retries > 0:
_retries -= 1
_whois_data = self.lookup(ip_address)
if len(_whois_data) and isinstance(_whois_data, str):
_lines = [_line for _line in _whois_data.splitlines()[:2]]
_keys, _values = [[_item.lstrip().rstrip() for _item in _line.split('|')] for _line in _lines]
_keys = [_key.lower().replace(' ', '_') for _key in _keys]
_values = [_value.split(',')[0] for _value in _values]
dictionary.update({ip_address: dict(zip(_keys, _values))})
class MinimalNetstat(MinimalWhois):
def __init__(self):
""" This is my Python 3 netstat implementation. My intention here is not reinvent the wheel. Instead of the
default Linux netstat's behaviour, my implementation will describe and monitor states and timers. We're
inheriting my whois implementation to have proper access to my local ip """
super(MinimalNetstat, self).__init__()
self.tcp4 = kernel_tcp4_info
self.states = tcp_states
self.tcp4_data = self.parse_tcp4_data()
if self.tcp4_data is not None:
self.netstat = self.tcp4_data
else:
print("Could not retrieve TCP data.")
sys.exit(1)
def read_proc_tcp4(self):
""" Reads the data on /proc/net/tcp to get all currently available IPv4 TCP connections """
try:
with open(self.tcp4, 'r') as _proc:
return [_line.replace('\n', '') for _line in _proc.readlines()[1:] if len(_line)]
except IOError:
return None
def parse_tcp4_data(self):
""" Get information about all currently available IPv4 TCP connections using the read_proc_tcp4 method and
parse the information through some conversion methods as per Linux Kernel conventions """
_status_keys = ['pname', 'pumask', 'pstate', 'th', 'pmem']
_tcp4_data = dict()
_data = self.read_proc_tcp4()
if _data is None:
return _data
for _entry in _data:
_cells = _entry.split()
_id = _cells[0].replace(':', '')
_hex_local_host, _hex_local_port = _cells[1].split(':')
_local_host, _local_port = reversed_endian2octets(_hex_local_host), hex2dec(_hex_local_port)
_hex_remote_host, _hex_remote_port = _cells[2].split(':')
_remote_host, _remote_port = reversed_endian2octets(_hex_remote_host), hex2dec(_hex_remote_port)
if _remote_host != '0.0.0.0':
_layer = 'secure' if _remote_port == '443' else 'insecure'
_cstate = self.states[_cells[3]]
_timer, _jiffy = _cells[5].split(':')
_timer_type, _timer = timers_and_jiffies(_timer, _jiffy)
_uid = pwd.getpwuid(int(_cells[7]))[0]
_inode = _cells[9]
_inode_pid = get_pid_of_inode(_inode)
_pid_status = get_process_info(_inode_pid)
_pname, _pumask, _pstate, _th, _pmem = [_pid_status[_ps_key] for _ps_key in _status_keys]
_pname = _pname[:11] if len(_pname) > 11 else _pname
try:
_app_path = os.readlink("/proc/{0}/exe".format(_inode_pid)) # .split(os.path.sep)[-1]
except FileNotFoundError:
_app_path = '--NA--'
_tcp4_entry = {'id': _id, 'cstate': _cstate, 'localhost': _local_host, 'lport': _local_port,
'remotehost': _remote_host, 'rport': _remote_port, 'time': _timer, 'timer': _timer_type,
'user': _uid, 'inode': _inode, 'pid': _inode_pid, 'name': _pname, 'app_path': _app_path,
'umask': _pumask, 'pstate': _pstate, 'th': _th, 'mem': _pmem, 'layer': _layer,
'ipv': 'IPv4'}
_tcp4_data.update({_remote_host: _tcp4_entry})
return _tcp4_data
class PickleDict:
def __init__(self):
""" Handles storage dictionaries to dist through cpickle and reading them to act as a cache on disk for ASN
information concerning IP addresses already queried. TODO: store timestamp to query again if the stored record
is older than X days (to check if a block of IPs now belongs to a different company """
self.pickle_file = "asn_info.pickle"
self.pickle_path = script_path
self.my_pickle = os.path.join(self.pickle_path, self.pickle_file)
def touch(self):
""" This method is only being used while I write this code, to "reset" our cache for testing purposes """
try:
open(self.my_pickle, 'w').close()
except IOError:
print("Can't touch {0} file!".format(self.my_pickle))
sys.exit(1)
def read(self):
""" Read the cache file from disk """
if os.path.isfile(self.my_pickle):
try:
with open(self.my_pickle, 'rb') as _pickle:
return pickle.load(_pickle)
except IOError:
return False
else:
return False
def write(self, pickle_data):
""" Writes the given dictionary (pickle_data) to disk """
try:
with open(self.my_pickle, 'wb') as _pickle:
pickle.dump(pickle_data, _pickle)
except IOError:
print("Can't write {0} file!".format(self.my_pickle))
sys.exit(1)
class INetstat(MinimalNetstat):
def __init__(self):
""" We're inheriting MinimalNetstat Class (which inherits MinimalWhois) and we'll also store a timestamp right
at the initialization so we can further check the execution time (for testing purposes) """
self.start = time.time()
super(INetstat, self).__init__()
self.pickle_dict = PickleDict()
self.open_connections = len(self.netstat)
self.asn_data = self.get_asn_data()
for mutual_key, values in self.netstat.items():
self.netstat[mutual_key] = {**self.netstat[mutual_key], **self.asn_data[mutual_key]}
def sort_items(dictionary):
return dictionary[1]['ipv'], dictionary[1]['rport'], dictionary[1]['cstate'], dictionary[1]['timer'],\
dictionary[1]['time'], dictionary[1]['as_name'], dictionary[1]['cc'], dictionary[1]['allocated'],\
dictionary[1]['remotehost']
_netstat_sorted_items = sorted(self.netstat.items(), key=sort_items)
self.ordered_netstat = OrderedDict(_netstat_sorted_items)
def read_asn_data_from_disk(self):
""" Return our ASN cache from disk (cpickle stored file) or None if we could not find a cache file """
_asn_data = self.pickle_dict.read()
return _asn_data if _asn_data else None
def get_asn_data(self):
""" This method identifies which IP addresses are unknown to our cache file and prepare them to be queried.
The list of IPs to query are divided into smaller lists concerning the number of available threads on the
system, as we're multiprocessing the method that perform que queries to optimize execution time"""
_ips_list = [_ip_address for _ip_address in self.netstat.keys()]
_unknown_asn_data = list()
_known_asn_data = self.read_asn_data_from_disk()
if _known_asn_data and isinstance(_known_asn_data, dict):
_unknown_asn_data = [_ip_address for _ip_address in _ips_list if _ip_address not in _known_asn_data]
else:
_known_asn_data = dict()
_unknown_asn_data = [_ip_address for _ip_address in _ips_list if _ip_address not in _unknown_asn_data]
manager = Manager()
asn_dictionary = manager.dict()
if len(_unknown_asn_data) > threads_available:
_chunks_to_process = split_lists(_unknown_asn_data, threads_available) # divides the list into smaller ones
for _chunk in _chunks_to_process: # start the query processes in chunks (concerning lists to be queried)
job = [Process(target=self.parse_data, args=(asn_dictionary, _ip)) for _ip in _chunk]
_ = [p.start() for p in job]
_ = [p.join() for p in job]
else:
job = [Process(target=self.parse_data, args=(asn_dictionary, _ip)) for _ip in _unknown_asn_data]
_ = [p.start() for p in job]
_ = [p.join() for p in job]
_complete_asn_data = {**asn_dictionary, **_known_asn_data} # merge the previous known data (cache) with new one
self.pickle_dict.write(_complete_asn_data)
return _complete_asn_data
def dict_values_len(dictionary, minimum_column_size=5):
""" Reads the given dictionary and return a new one containing each one of it's keys with it's correspondent length,
which represents whe length of the largest value attributed to that same key"""
_values_len_dict = dict()
for k, v in dictionary.items():
for _k, _v in v.items():
_v = str(_v)
if _k not in _values_len_dict or _values_len_dict[_k] < len(_v):
_length = len(_v) if len(_v) >= minimum_column_size else minimum_column_size
_values_len_dict.update({_k: _length})
return _values_len_dict
def pretty_print(string, string_type=None):
""" Take care of determining which fields should be justified to each side to improve readability """
string = str(string)
_string_length = pprint_dict[string_type] if string_type is not None else pprint_dict[string]
_right_justified_strings = ['localhost', 'remotehost', 'mem', 'timer', 'bgp_prefix']
if string in _right_justified_strings or string_type in _right_justified_strings:
return string.rjust(_string_length)
else:
return string.ljust(_string_length)
def print_inetstat():
""" Print inetstat results to the terminal """
keys_to_print = ['localhost', 'lport', 'cstate', 'remotehost', 'rport', 'layer', 'ipv', 'pid', 'name', 'umask',
'pstate', 'th', 'mem', 'timer', 'time', 'cc', 'allocated', 'bgp_prefix', 'as_name']
for key in keys_to_print:
print(pretty_print(key), end=' ')
print()
for key, value in inetstat_dict.items():
for _key in keys_to_print:
print(pretty_print(value[_key], _key), end=' ')
print()
if __name__ == "__main__":
inetstat = INetstat()
inetstat_dict = inetstat.ordered_netstat
pprint_dict = dict_values_len(inetstat_dict)
print_inetstat()
end = time.time()
print("exec time: {0:.2f}s".format(end - inetstat.start))
|
mp3_dl.py
|
import youtube_dl, spotipy, os, requests, threading, queue
from spotipy.oauth2 import SpotifyOAuth
from mutagen.mp3 import MP3
from mutagen.id3 import ID3, APIC, TIT2, TPE1, TALB, TRCK
from mutagen.id3 import ID3NoHeaderError
from youtube_dl.utils import DownloadError
from requests.exceptions import HTTPError
number_of_threads = 10
root = os.getcwd()
q = queue.Queue()
sp = None
count = 0
total = 0
ytdl_options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'extractaudio': True,
'audioformat': 'mp3',
'ffmpeg-location': os.getcwd() + '/ffmpeg/ffmpeg.exe',
'hls-prefer-ffmpeg': True,
'outtmpl': '%(id)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn',
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
def dl_yt_playlist(link, silent=False):
print('Gathering Youtube playlist data...')
try:
result = ytdl.extract_info(link, download=False)
except DownloadError as e:
if 'This video is DRM protected' in str(e):
print('ERROR: Invalid Youtube playlist URL')
return
playlist_name = result['title']
print('Downloading Youtube playlist: \"{}\"'.format(playlist_name))
playlist_name = legalize_chars(playlist_name)
if not os.path.exists(playlist_name):
os.mkdir(playlist_name)
os.chdir(playlist_name)
global count, total
total = len(result['entries'])
count = 0
for i in range(number_of_threads):
t = threading.Thread(target=yt_playlist_worker)
t.daemon = True
t.start()
for video in result['entries']:
q.put(video)
q.join()
os.chdir(root + '/out')
if not silent:
print('Playlist download complete!')
count = 0
total = 0
return True
def dl_yt_video(link, silent=True, recurse=0):
try:
if not silent:
print('Downloading Youtube video...')
result = ytdl.extract_info(link)
filename = '{}.mp3'.format(result['id'])
new_name = result['title']
new_name = '{}.mp3'.format(legalize_chars(new_name))
if not silent:
print('Audio from: \"{}\" Download complete!'.format(result['title']))
if os.path.exists(new_name):
os.remove(new_name)
os.rename(filename, new_name)
except (DownloadError, HTTPError) as e:
if 'ffprobe/avprobe and ffmpeg/avconv not found' in str(e):
return
if 'Incomplete YouTube ID' in str(e):
return
elif 'Video unavailable' in str(e):
return
if recurse >= 4:
print('Retry unsucessful!')
return
else:
print('ERROR: Download of: \"{}\" failed. Retrying...'.format(link))
dl_yt_video(link, silent=True, recurse=recurse+1)
if recurse:
print('Retry sucessful!')
try:
return filename
except UnboundLocalError as e:
if str(e) == 'local variable \'filename\' referenced before assignment':
return
def dl_query(query, silent=True, duration=None, recurse=0):
try:
if duration:
lyric_vids = ytdl.extract_info('ytsearch:{} lyrics'.format(query), download=False, extra_info={'duration', 'id'})['entries']
best_diff = abs(duration - lyric_vids[0]['duration'])
best_option = lyric_vids[0]
for result in reversed(lyric_vids):
diff = abs(duration - result['duration'])
if (diff <= best_diff + 5):
best_option = result
best_diff = diff
ytdl.download([best_option['webpage_url']])
filename = '{}.mp3'.format(best_option['id'])
else:
if not silent:
print('Querying Youtube search for \"{}\"...'.format(query))
result = ytdl.extract_info('ytsearch:{}'.format(query))
filename = '{}.mp3'.format(result['entries'][0]['id'])
new_name = result['entries'][0]['title']
if not silent:
print('Audio from: \"{}\" Download complete!'.format(new_name))
new_name = '{}.mp3'.format(legalize_chars(new_name))
if os.path.exists(new_name):
os.remove(new_name)
os.rename(filename, new_name)
except (DownloadError, HTTPError) as e:
if 'ffprobe/avprobe and ffmpeg/avconv not found' in str(e):
return
if recurse >= 4:
print('Retry unsucessful! Please try again later.')
return None
else:
print('ERROR: Download for query: \"', query, '\" failed. Retrying...')
filename = dl_query(query, silent=True, duration=duration, recurse=recurse+1)
if recurse:
print('Retry sucessful, Download complete!')
return filename
def dl_spotify(input_link, silent=False):
if not sp:
spotipy_initialize()
playlist_name = input_link['name']
if len(input_link['tracks']['items'][0]) == 6:
playlist_type = 'playlist'
else:
playlist_type = 'album'
album = []
album.append(input_link['images'][0]['url'])
album.append(playlist_name)
print('Downloading Spotify {}: \"{}\"...'.format(playlist_type, playlist_name))
playlist_name = legalize_chars(playlist_name)
if not os.path.exists(playlist_name):
os.mkdir(playlist_name)
os.chdir(playlist_name)
playlist = input_link['tracks']
global total, count
while playlist['next']:
total = total + 100
playlist = sp.next(playlist)
tracks = playlist['items']
total = total + len(tracks)
for i in range(number_of_threads):
t = threading.Thread(target=sp_playlist_worker)
t.daemon = True
t.start()
playlist = input_link['tracks']
while playlist['next']:
tracks = playlist['items']
for track in tracks:
if playlist_type == 'playlist':
track = track['track']
if playlist_type == 'playlist':
args = [track, True, None]
else:
args = [track, True, album]
q.put(args)
playlist = sp.next(playlist)
tracks = playlist['items']
for track in tracks:
if playlist_type == 'playlist':
track = track['track']
if playlist_type == 'playlist':
args = [track, True, None]
else:
args = [track, True, album]
q.put(args)
q.join()
if not silent:
print('Playlist download complete!')
os.chdir(root + '/out')
count = 0
total = 0
return True
def dl_sp_track(track, silent=True, album=None):
if not sp:
spotipy_initialize
title = track['name']
artist = track['artists'][0]['name']
if not silent:
print('Downloading Spotify track: \"{} - {}\".'.format(title, artist))
query = '{} {}'.format(title, artist)
new_name = '{} - {}.mp3'.format(title, artist)
new_name = legalize_chars(new_name)
if os.path.isfile(new_name):
return
duration = int(track['duration_ms'] / 1000)
filename = dl_query(query, duration=duration)
if not filename:
return track
if not os.path.isfile(new_name):
os.rename(filename, new_name)
album_name = album[1] if album else track['album']['name']
track_number = str(track['track_number'])
thumbnail_name = '{}-{}.jpg'.format(legalize_chars(album_name), track_number)
with open(thumbnail_name, 'wb') as handle:
try:
if album:
thumbnail = requests.get(album[0]).content
else:
thumbnail = requests.get(track['album']['images'][0]['url']).content
handle.write(thumbnail)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
print('ERROR: Processing of thumbnail for \"{} - {}\" failed.'.format(title, artist))
try:
audio = MP3(new_name, ID3=ID3)
audio.tags.add(
APIC(
encoding=3,
mime='image/jpg',
type=3,
desc=u'Cover',
data=open(thumbnail_name, 'rb').read()
)
)
audio.save(v2_version=3)
try:
tags = ID3(new_name)
except ID3NoHeaderError:
print("Adding ID3 header")
tags = ID3()
tags["TIT2"] = TIT2(encoding=3, text=title)
tags["TALB"] = TALB(encoding=3, text=album_name)
tags["TPE1"] = TPE1(encoding=3, text=artist)
tags["TRCK"] = TRCK(encoding=3, text=track_number)
tags.save(new_name)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
if not ('Errno 13' in str(e)): print('ERROR: ID3 tags unable to be written.')
os.remove(thumbnail_name)
if not silent:
print('Download complete!')
def progress(count=0, total=1, song=''):
percentage = int(count * 1000 / total)
print(
'{}/{} '.format(count, total),
percentage / 10.0, '% ',
'\"{}\" Download Complete!'.format(song),
)
def legalize_chars(filename):
illegal_chars = ['<', '>', ':', '\"', '/', '\\', '|', '?', '*']
for char in illegal_chars:
if char in filename:
filename = filename.replace(char, '')
return filename
def spotipy_initialize(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):
global sp
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET,
redirect_uri='http://localhost:8000',
scope='user-library-read',
cache_path='{}/OAuthCache.txt'.format(root)
)
)
def sp_playlist_worker():
while True:
global count, total
args = q.get()
dl_sp_track(args[0], args[1], args[2])
count = count + 1
progress(count, total, '{} - {}'.format(
args[0]['name'],
args[0]['artists'][0]['name']
))
q.task_done()
def yt_playlist_worker():
while True:
global count, total
video = q.get()
dl_yt_video(video['webpage_url'])
count = count + 1
progress(count, total, video['title'])
q.task_done()
|
webcam.py
|
# https://github.com/aiortc/aiortc/tree/master/examples/webcam
# read frames from a webcam and send them to a browser.
# video autostarts
# can be used in an iFrame
# use WebcamVideoStream to run the camera in a thread - reduces wait time for next frame
import argparse
from collections import namedtuple
from threading import Thread
import asyncio
import json
import logging
import os
import platform
import ssl
import datetime
import math
import dlib
import cv2
import numpy
from av import VideoFrame
import paho.mqtt.client as mqtt
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
# from aiortc.contrib.media import MediaPlayer
from pyimagesearch.directioncounter import DirectionCounter
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
ROOT = os.path.dirname(__file__)
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
# This class originally from
# https://github.com/aiortc/aiortc/tree/master/examples/videostream-cli
class OutputVideoStreamTrack(VideoStreamTrack):
"""
A video track that returns camera video.
"""
def __init__(self):
super().__init__() # don't forget this!
self.counter = 0
# generate flag
data_bgr = numpy.hstack(
[
self._create_rectangle(
width=imageHeigth//3, height=imageHeigth, color=(255, 0, 0)
), # blue
self._create_rectangle(
width=imageHeigth//3+1, height=imageHeigth, color=(255, 255, 255)
), # white
self._create_rectangle(width=imageHeigth//3, height=imageHeigth, color=(0, 0, 255)), # red
]
)
# shrink and center it
M = numpy.float32([[0.5, 0, imageWidth / 4], [0, 0.5, imageHeigth / 4]])
data_bgr = cv2.warpAffine(data_bgr, M, (imageWidth, imageHeigth))
# compute animation
omega = 2 * math.pi / imageHeigth
id_x = numpy.tile(numpy.array(range(imageWidth), dtype=numpy.float32), (imageHeigth, 1))
id_y = numpy.tile(
numpy.array(range(imageHeigth), dtype=numpy.float32), (imageWidth, 1)
).transpose()
self.frames = []
for k in range(30):
phase = 2 * k * math.pi / 30
map_x = id_x + 10 * numpy.cos(omega * id_x + phase)
map_y = id_y + 10 * numpy.sin(omega * id_x + phase)
self.frames.append(
VideoFrame.from_ndarray(
cv2.remap(data_bgr, map_x, map_y, cv2.INTER_LINEAR), format="bgr24"
)
)
async def recv(self):
pts, time_base = await self.next_timestamp()
frame = self.frames[self.counter % 30]
frame.pts = pts
frame.time_base = time_base
self.counter += 1
if (trafficDetector.ready):
# there an image we haven't used yet
image = trafficDetector.read()
elif (webcam.grabbed):
# check if camera has a frame ready
# get camera frame
image = webcam.read()
else:
#use an blank frame
image = create_blank(imageWidth, imageHeigth, rgb_color=(88,111,88))
# perform edge detection
#image = frame.to_ndarray(format="bgr24")
#image = cv2.cvtColor(cv2.Canny(image, 100, 200), cv2.COLOR_GRAY2BGR)
# add date/time
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (5,imageHeigth -5)
fontScale = .4
fontColor = (255,0,0)
#fontColor = (0,0,0)
lineType = 1
cv2.putText(image,str(datetime.datetime.now()),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
#flood the MQTT messaging: test only
#mqttClient.publish("TrafficCounter/TEST", str(datetime.datetime.now()))
# rebuild a VideoFrame, preserving timing information
new_frame = VideoFrame.from_ndarray(image, format="bgr24")
new_frame.pts = frame.pts
new_frame.time_base = frame.time_base
return new_frame
def _create_rectangle(self, width, height, color):
data_bgr = numpy.zeros((height, width, 3), numpy.uint8)
data_bgr[:, :] = color
return data_bgr
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
print("ICE connection state is %s" % pc.iceConnectionState)
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc)
# # open media source
# if args.play_from:
# player = MediaPlayer(args.play_from)
# else:
# options = {"framerate": "30", "video_size": "640x480"}
# if platform.system() == "Darwin":
# player = MediaPlayer("default:none", format="avfoundation", options=options)
# else:
# player = MediaPlayer("/dev/video0", format="v4l2", options=options)
await pc.setRemoteDescription(offer)
for t in pc.getTransceivers():
# if t.kind == "audio" and player.audio:
# pc.addTrack(player.audio)
# elif t.kind == "video" and player.video:
# #pc.addTrack(player.video)
# pc.addTrack(OutputVideoStreamTrack())
if t.kind == "video":
pc.addTrack(OutputVideoStreamTrack())
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
async def on_shutdown(app):
# halt camera
webcam.stop()
# halt image processor
trafficDetector.stop()
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
class WebcamVideoStream:
#https://github.com/jrosebr1/imutils/blob/master/imutils/video/webcamvideostream.py
def __init__(self, src=0, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
# https://techoverflow.net/2018/12/18/how-to-set-cv2-videocapture-image-size/
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, imageWidth)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, imageHeigth)
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class detector:
#https://github.com/jrosebr1/imutils/blob/master/imutils/video/webcamvideostream.py
# based on the WebcamVideoStream class; which this reads from
# creates a thread to process current frame looking for
# object crossing a line.
# counts movement, and provides image marked with detected objects
def __init__(self):
# initialize the thread name
self.name = "TrafficCounter"
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
#initiallize variable that determines if frame is ready to be consumed
self.ready = False
#initialze the variable that hods the processed image
self.frame = ''
#initialize variable for status of the detector
self.status = "Initializing"
# total number of frames processed
# used to determine if it's time for a 'deep dive' detection
self.totalFrames = 0
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a trackable object
self.centroid_Tracker = CentroidTracker(maxDisappeared=20, maxDistance=30)
self.trackers = []
self.trackableObjects = {}
# initialize the direction info variable (used to store information
# such as up/down or left/right people count)
self.directionInfo = None
# number of frames to skip between detections
self.deepDedectionOnFrame = 30
#a fake system message to make the demo look good
mqttClient.publish("{}/{}".format(mqtt_client_name,"environment/cpu_temp"), "72C")
def start(self):
# start the thread to process frames
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, process next frame
(self.ready, self.frame) = self.trackMovment()
def read(self):
# indicate that image isn't yet ready. avoid pulling an old image
#self.ready = False
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def trackMovment(self):
# this function is the entire purpose of this script
# using, as a base, "\HackerBundle_Code\chapter13-object_detection_ncs\people_counter_openvino.py"
# examine frame for movement to count
#image = create_blank(imageWidth, imageHeigth, rgb_color=(88,111,88))
#print("tracking")
image = webcam.read()
"""
#convert into a cartoon
# prepare color
img_color = cv2.pyrDown(cv2.pyrDown(image))
for _ in range(6):
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
# prepare edges
img_edges = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
img_edges = cv2.adaptiveThreshold(
cv2.medianBlur(img_edges, 1),
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
9,
2,
)
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
# combine color and edges
imageCartoon = cv2.bitwise_and(img_color, img_edges)
"""
# perform edge detection to anonymize the video
imageOut = cv2.cvtColor(cv2.Canny(image, 200, 300), cv2.COLOR_GRAY2BGR)
imageOut = cv2.bitwise_not(imageOut)
#imageOut = image
# convert the frame from BGR to RGB for dlib
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# initialize the current status along with our list of bounding
# box rectangles returned by object detector
self.status = "Waiting"
rects = []
# every so often, run a more computationally expensive
# object detection method to aid our tracker
if self.totalFrames % self.deepDedectionOnFrame == 0:
# set the status and initialize our new set of object trackers
self.status = "Detecting"
self.trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(image, size=(300, 300), ddepth=cv2.CV_8U)
net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
detections = net.forward()
# loop over the detections
for i in numpy.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
desiredConfidence = 0.4
if confidence > desiredConfidence:
# extract the index of the class label from the detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
#print("no persons found")
continue
else:
#print("Person")
pass
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * numpy.array(
[imageWidth, imageHeigth, imageWidth, imageHeigth])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
self.trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing
# throughput
else:
# loop over the trackers
for tracker in self.trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
self.status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
#print(startX, startY, endX, endY)
# check if the direction is *vertical*
if directionMode == "vertical":
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
cv2.line(imageOut, (0, imageWidth // 2), (imageWidth, imageHeigth // 2), (0, 255, 255), 2)
# otherwise, the direction is *horizontal*
else:
# draw a vertical line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'left' or 'right'
cv2.line(imageOut, (imageWidth // 2, 0), (imageWidth // 2, imageHeigth), (0, 255, 255), 2)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = self.centroid_Tracker.update(rects)
# loop over the tracked objects
info = '' # a placeholder so text created within this loop can be used later
for (objectID, centroid) in objects.items():
# grab the trackable object via its object ID
tracked_Object = self.trackableObjects.get(objectID, None)
#print("ObID ", objectID, self.trackableObjects)
if tracked_Object is None:
# create a new trackable object if needed
tracked_Object = TrackableObject(objectID, centroid)
#print("new tracked object")
else:
# otherwise, there is a trackable object so we can utilize it
# to determine direction
# find the direction and update the list of centroids
#print("there is a trackable object")
direction_Counter.find_direction(tracked_Object, centroid)
tracked_Object.centroids.append(centroid)
# check to see if the object has been counted or not
if not tracked_Object.counted:
# find the direction of motion of the people
self.directionInfo = direction_Counter.count_object(tracked_Object, centroid)
# store the trackable object in our dictionary
self.trackableObjects[objectID] = tracked_Object
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
color = (0, 255, 0) if tracked_Object.counted else (0, 0, 255)
cv2.putText(imageOut, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2)
cv2.circle(imageOut, (centroid[0], centroid[1]),6, color, -1)
# check if there is any direction info available
if self.directionInfo is not None:
# construct a list of information as a combination of
# direction info and status info
info = self.directionInfo + [("Status", self.status)]
# send MQTT message
# for (key, value) in info:
# mqttClient.publish("{}/{}".format(mqtt_topic_Detected,key),value)
# mqttClient.publish("{}/{}".format(mqtt_topic_Detected,"time"),str(datetime.datetime.now()))
else:
# otherwise, there is no direction info available yet
# construct a list of information as status info since we
# don't have any direction info available yet
info = [("Status", self.status)]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
#print(i, (k, v))
text = "{}: {}".format(k, v)
cv2.putText(imageOut, text, (20, imageHeigth - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# send MQTT message
# send MQTT message - only if there is something to send
if (info != ''):
for (key, value) in info:
mqttClient.publish("{}/{}".format(mqtt_topic_Detected,key),value)
mqttClient.publish("{}/{}".format(mqtt_topic_Detected,"time"),str(datetime.datetime.now()))
# increment the total number of frames processed thus far
self.totalFrames += 1
return True, imageOut #imageCartoon
#****************************************************************
#https://stackoverflow.com/questions/9710520/opencv-createimage-function-isnt-working
def create_blank(width, height, rgb_color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in RGB"""
# Create black blank image
image = numpy.zeros((height, width, 3), numpy.uint8)
# Since OpenCV uses BGR, convert the color first
color = tuple(reversed(rgb_color))
# Fill image with color
image[:] = color
return image
#****************************************************************
def mqtt_on_connect(client, userdata, flags, rc):
if rc==0:
client.connected_flag=True #set flag
print("MQTT connected OK Returned code=",rc, flush=True) # the 'flush' empties the stfio buffer so the line appears in the journalctl log https://askubuntu.com/questions/620219/systemd-on-15-04-wont-log-stdout-of-unit
else:
client.connected_flag=False #set flag
print("Bad connection Returned code=",rc, flush=True)
def mqtt_on_publish(client,userdata,msgID): #create function for callback
#print("data published \n")
pass
def mqtt_on_disconnect(client, userdata, rc):
logging.info("disconnecting reason " +str(rc))
print("DISCONNECTED Returned code=" +str(rc), flush=True)
client.connected_flag=False
###############################################################
# start of app
###############################################################
# can only grab images/frames that the camera supports
# list valid formats of a USB camera => v4l2-ctl --list-formats-ext --device=0
# Resolution names: https://en.wikipedia.org/wiki/Display_resolution#/media/File:Vector_Video_Standards8.svg
cameraFormat = namedtuple('cameraFormat', 'width height fps')
cameraXGA = cameraFormat(width=1024, height=600, fps=30)
cameraVGA = cameraFormat(width=640, height=480, fps=30)
cameraQVGA = cameraFormat(width=320, height=240, fps=30)
imageWidth = cameraVGA.width
imageHeigth = cameraVGA.height
#parameters for MQTT
mqtt_broker_address="10.0.3.139"
mqtt_port=1883
mqtt_client_name = "TrafficCounter"
mqtt_topic_Detected = '{}/TrafficDetected'.format(mqtt_client_name)
# initialize the list of class labels MobileNet SSD detects
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
net = cv2.dnn.readNetFromCaffe(
"mobilenet_ssd/MobileNetSSD_deploy.prototxt",
"mobilenet_ssd/MobileNetSSD_deploy.caffemodel")
# set the preferable target processor to CPU (since Movidious is not installed)
# and preferable backend to OpenCV
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
print("[INFO] model loaded")
pcs = set()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WebRTC webcam demo")
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)", default="fullchain.pem")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)" , default="privkey.pem")
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=8888, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
#initialize the MQTT Client
# http://www.steves-internet-guide.com/client-objects-python-mqtt/
mqtt.Client.connected_flag=False #create flag in class
mqttClient = mqtt.Client(mqtt_client_name) #create new instance
mqttClient.on_connect=mqtt_on_connect #bind call back function
mqttClient.on_publish = mqtt_on_publish #assign function to callback
mqttClient.on_disconnect=mqtt_on_disconnect #bind call back function
mqttClient.connect(mqtt_broker_address,mqtt_port) #establish connection
mqttClient.loop_start() #start the loop
# initialize camera; start grabbing frames
webcam = WebcamVideoStream().start()
# initialize the
trafficDetector = detector().start()
# instantiate our direction counter
directionMode = "horizontal" # "vertical"
direction_Counter = DirectionCounter(directionMode, imageHeigth, imageWidth)
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
app.router.add_post("/offer", offer)
web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context)
|
web.py
|
# coding=utf-8
import os
import re
import sys
import functools
import threading
import cherrypy
import json
import subprocess
import traceback
import webbrowser
import argparse
from mako.template import Template
from uuid import uuid4
import telenium
from telenium.client import TeleniumHttpClient
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
from os.path import dirname, join, realpath
from time import time, sleep
SESSION_FN = ".telenium.dat"
TPL_EXPORT_UNITTEST = u"""<%!
def capitalize(text):
return text.capitalize()
def camelcase(text):
return "".join([x.strip().capitalize() for x in text.split()])
def funcname(text):
if text == "init":
return "init"
import re
suffix = re.sub(r"[^a-z0-9_]", "_", text.lower().strip())
return "test_{}".format(suffix)
def getarg(text):
import re
return re.match("^(\w+)", text).groups()[0]
%># coding=utf-8
import time
from telenium.tests import TeleniumTestCase
class ${settings["project"]|camelcase}TestCase(TeleniumTestCase):
% if env:
cmd_env = ${ env }
% endif
cmd_entrypoint = [u'${ settings["entrypoint"] }']
% for test in tests:
% if test["name"] == "setUpClass":
<% vself = "cls" %>
@classmethod
def setUpClass(cls):
super(${settings["project"]|camelcase}TestCase, cls).setUpClass()
% else:
<% vself = "self" %>
def ${test["name"]|funcname}(self):
% if not test["steps"]:
pass
% endif
% endif
% for key, value, arg1, arg2 in test["steps"]:
% if key == "wait":
${vself}.cli.wait('${value}', timeout=${settings["command-timeout"]})
% elif key == "wait_click":
${vself}.cli.wait_click('${value}', timeout=${settings["command-timeout"]})
% elif key == "assertExists":
${vself}.assertExists('${value}', timeout=${settings["command-timeout"]})
% elif key == "assertNotExists":
${vself}.assertNotExists('${value}', timeout=${settings["command-timeout"]})
% elif key == "assertAttributeValue":
attr_name = '${arg1|getarg}'
attr_value = ${vself}.cli.getattr('${value}', attr_name)
${vself}.assertTrue(eval('${arg1}', {attr_name: attr_value}))
% elif key == "setAttribute":
${vself}.cli.setattr('${value}', '${arg1}', ${arg2})
% elif key == "sendKeycode":
${vself}.cli.send_keycode('${value}')
% elif key == "sleep":
time.sleep(${value})
% endif
% endfor
% endfor
"""
FILE_API_VERSION = 3
local_filename = None
def threaded(f):
@functools.wraps(f)
def _threaded(*args, **kwargs):
thread = threading.Thread(target=f, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return _threaded
def funcname(text):
return text.lower().replace(" ", "_").strip()
def getarg(text):
return re.match("^(\w+)", text).groups()[0]
class ApiWebSocket(WebSocket):
t_process = None
cli = None
progress_count = 0
progress_total = 0
session = {
"settings": {
"project": "Test",
"entrypoint": "main.py",
"application-timeout": "10",
"command-timeout": "5"
},
"env": {},
"tests": [{
"id": str(uuid4()),
"name": "New test",
"steps": []
}]
}
def opened(self):
super(ApiWebSocket, self).opened()
self.load()
def closed(self, code, reason=None):
pass
def received_message(self, message):
msg = json.loads(message.data)
try:
getattr(self, "cmd_{}".format(msg["cmd"]))(msg["options"])
except:
traceback.print_exc()
def send_object(self, obj):
data = json.dumps(obj)
self.send(data, False)
def save(self):
self.session["version_format"] = FILE_API_VERSION
# check if the file changed
if local_filename is not None:
changed = False
try:
with open(local_filename) as fd:
data = json.loads(fd.read())
changed = data != self.session
except:
changed = True
self.send_object(["changed", changed])
with open(SESSION_FN, "w") as fd:
fd.write(json.dumps(self.session))
def load(self):
try:
with open(SESSION_FN) as fd:
session = json.loads(fd.read())
session = upgrade_version(session)
self.session.update(session)
except:
pass
def get_test(self, test_id):
for test in self.session["tests"]:
if test["id"] == test_id:
return test
def get_test_by_name(self, name):
for test in self.session["tests"]:
if test["name"] == name:
return test
@property
def is_running(self):
return self.t_process is not None
# command implementation
def cmd_recover(self, options):
if local_filename:
self.send_object(["is_local", True])
self.send_object(["settings", self.session["settings"]])
self.send_object(["env", dict(self.session["env"].items())])
tests = [{
"name": x["name"],
"id": x["id"]
} for x in self.session["tests"]]
self.send_object(["tests", tests])
if self.t_process is not None:
self.send_object(["status", "running"])
def cmd_save_local(self, options):
try:
assert local_filename is not None
# save json source
data = self.export("json")
with open(local_filename, "w") as fd:
fd.write(data)
# auto export to python
filename = local_filename.replace(".json", ".py")
data = self.export("python")
with open(filename, "w") as fd:
fd.write(data)
self.send_object(["save_local", "ok"])
self.send_object(["changed", False])
except Exception as e:
traceback.print_exc()
self.send_object(["save_local", "error", repr(e)])
def cmd_sync_env(self, options):
while self.session["env"]:
self.session["env"].pop(self.session["env"].keys()[0])
for key, value in options.get("env", {}).items():
self.session["env"][key] = value
self.save()
def cmd_sync_settings(self, options):
self.session["settings"] = options["settings"]
self.save()
def cmd_sync_test(self, options):
uid = options["id"]
for test in self.session["tests"]:
if test["id"] == uid:
test["name"] = options["name"]
test["steps"] = options["steps"]
self.save()
def cmd_add_test(self, options):
self.session["tests"].append({
"id": str(uuid4()),
"name": "New test",
"steps": []
})
self.save()
self.send_object(["tests", self.session["tests"]])
def cmd_clone_test(self, options):
for test in self.session["tests"]:
if test["id"] != options["test_id"]:
continue
clone_test = test.copy()
clone_test["id"] = str(uuid4())
clone_test["name"] += " (1)"
self.session["tests"].append(clone_test)
break
self.save()
self.send_object(["tests", self.session["tests"]])
def cmd_delete_test(self, options):
for test in self.session["tests"][:]:
if test["id"] == options["id"]:
self.session["tests"].remove(test)
if not self.session["tests"]:
self.cmd_add_test(None)
self.save()
self.send_object(["tests", self.session["tests"]])
def cmd_select(self, options):
if not self.cli:
status = "error"
results = "Application not running"
else:
try:
results = self.cli.highlight(options["selector"])
status = "ok"
except Exception as e:
status = "error"
results = u"{}".format(e)
self.send_object(["select", options["selector"], status, results])
def cmd_select_test(self, options):
test = self.get_test(options["id"])
self.send_object(["test", test])
@threaded
def cmd_pick(self, options):
if not self.cli:
return self.send_object(["pick", "error", "App is not started"])
objs = self.cli.pick(all=True)
return self.send_object(["pick", "success", objs])
@threaded
def cmd_execute(self, options):
self.execute()
def cmd_run_step(self, options):
self.run_step(options["id"], options["index"])
@threaded
def cmd_run_steps(self, options):
test = self.get_test(options["id"])
if test is None:
self.send_object(["alert", "Test not found"])
return
if not self.is_running:
ev_start, ev_stop = self.execute()
ev_start.wait()
if ev_stop.is_set():
return
self.run_test(test)
@threaded
def cmd_run_tests(self, options):
# restart always from scratch
self.send_object(["progress", "started"])
# precalculate the number of steps to run
count = sum([len(x["steps"]) for x in self.session["tests"]])
self.progress_count = 0
self.progress_total = count
try:
ev_start, ev_stop = self.execute()
ev_start.wait()
if ev_stop.is_set():
return
setup = self.get_test_by_name("setUpClass")
if setup:
if not self.run_test(setup):
return
setup = self.get_test_by_name("init")
if setup:
if not self.run_test(setup):
return
for test in self.session["tests"]:
if test["name"] in ("setUpClass", "init"):
continue
if not self.run_test(test):
return
finally:
self.send_object(["progress", "finished"])
def cmd_stop(self, options):
if self.t_process:
self.t_process.terminate()
def cmd_export(self, options):
try:
dtype = options["type"]
mimetype = {
"python": "text/plain",
"json": "application/json"
}[dtype]
ext = {"python": "py", "json": "json"}[dtype]
key = funcname(self.session["settings"]["project"])
filename = "test_ui_{}.{}".format(key, ext)
export = self.export(options["type"])
self.send_object(["export", export, mimetype, filename, dtype])
except Exception as e:
self.send_object(["export", "error", u"{}".format(e)])
def export(self, kind):
if kind == "python":
return Template(TPL_EXPORT_UNITTEST).render(
session=self.session, **self.session)
elif kind == "json":
self.session["version_format"] = FILE_API_VERSION
return json.dumps(
self.session, sort_keys=True, indent=4, separators=(',', ': '))
def execute(self):
ev_start = threading.Event()
ev_stop = threading.Event()
self._execute(ev_start=ev_start, ev_stop=ev_stop)
return ev_start, ev_stop
@threaded
def _execute(self, ev_start, ev_stop):
self.t_process = None
try:
self.start_process()
ev_start.set()
self.t_process.communicate()
self.send_object(["status", "stopped", None])
except Exception as e:
try:
self.t_process.terminate()
except:
pass
try:
self.send_object(["status", "stopped", u"{}".format(e)])
except:
pass
finally:
self.t_process = None
ev_stop.set()
def start_process(self):
url = "http://localhost:9901/jsonrpc"
process_start_timeout = 10
telenium_token = str(uuid4())
self.cli = cli = TeleniumHttpClient(url=url, timeout=10)
# entry no any previous telenium is running
try:
cli.app_quit()
sleep(2)
except:
pass
# prepare the application
entrypoint = self.session["settings"]["entrypoint"]
cmd = [sys.executable, "-m", "telenium.execute", entrypoint]
cwd = dirname(entrypoint)
if not os.path.isabs(cwd):
cwd = os.getcwd()
env = os.environ.copy()
env.update(self.session["env"])
env["TELENIUM_TOKEN"] = telenium_token
# start the application
self.t_process = subprocess.Popen(cmd, env=env, cwd=cwd)
# wait for telenium server to be online
start = time()
while True:
try:
if cli.app_ready():
break
except Exception:
if time() - start > process_start_timeout:
raise Exception("timeout")
sleep(1)
# ensure the telenium we are connected are the same as the one we
# launched here
if cli.get_token() != telenium_token:
raise Exception("Connected to another telenium server")
self.send_object(["status", "running"])
def run_test(self, test):
test_id = test["id"]
try:
self.send_object(["test", test])
self.send_object(["run_test", test_id, "running"])
for index, step in enumerate(test["steps"]):
if not self.run_step(test_id, index):
return
return True
except Exception as e:
self.send_object(["run_test", test_id, "error", str(e)])
else:
self.send_object(["run_test", test_id, "finished"])
def run_step(self, test_id, index):
self.progress_count += 1
self.send_object(
["progress", "update", self.progress_count, self.progress_total])
try:
self.send_object(["run_step", test_id, index, "running"])
success = self._run_step(test_id, index)
if success:
self.send_object(["run_step", test_id, index, "success"])
return True
else:
self.send_object(["run_step", test_id, index, "error"])
except Exception as e:
self.send_object(["run_step", test_id, index, "error", str(e)])
def _run_step(self, test_id, index):
test = self.get_test(test_id)
if not test:
raise Exception("Unknown test")
cmd, selector, arg1, arg2 = test["steps"][index]
timeout = 5
if cmd == "wait":
return self.cli.wait(selector, timeout=timeout)
elif cmd == "wait_click":
self.cli.wait_click(selector, timeout=timeout)
return True
elif cmd == "assertExists":
return self.cli.wait(selector, timeout=timeout) is True
elif cmd == "assertNotExists":
return self.assertNotExists(self.cli, selector, timeout=timeout)
elif cmd == "assertAttributeValue":
attr_name = getarg(arg1)
attr_value = self.cli.getattr(selector, attr_name)
return bool(eval(arg1, {attr_name: attr_value}))
elif cmd == "setAttribute":
return self.cli.setattr(selector, arg1, eval(arg2))
elif cmd == "sendKeycode":
self.cli.send_keycode(selector)
return True
elif cmd == "sleep":
sleep(float(selector))
return True
def assertNotExists(self, cli, selector, timeout=-1):
start = time()
while True:
matches = cli.select(selector)
if not matches:
return True
if timeout == -1:
raise AssertionError("selector matched elements")
if timeout > 0 and time() - start > timeout:
raise Exception("Timeout")
sleep(0.1)
class Root(object):
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect("/static/index.html")
@cherrypy.expose
def ws(self):
pass
class WebSocketServer(object):
def __init__(self, host="0.0.0.0", port=8080, open_webbrowser=True):
super(WebSocketServer, self).__init__()
self.host = host
self.port = port
self.daemon = True
self.open_webbrowser = open_webbrowser
def run(self):
cherrypy.config.update({
"global": {
"environment": "production"
},
"server.socket_port": self.port,
"server.socket_host": self.host,
})
cherrypy.tree.mount(
Root(),
"/",
config={
"/": {
"tools.sessions.on": True
},
"/ws": {
"tools.websocket.on": True,
"tools.websocket.handler_cls": ApiWebSocket
},
"/static": {
"tools.staticdir.on":
True,
"tools.staticdir.dir":
join(realpath(dirname(__file__)), "static"),
"tools.staticdir.index":
"index.html"
}
})
cherrypy.engine.start()
url = "http://{}:{}/".format(self.host, self.port)
print("Telenium {} ready at {}".format(telenium.__version__, url))
if self.open_webbrowser:
webbrowser.open(url)
cherrypy.engine.block()
def stop(self):
cherrypy.engine.exit()
cherrypy.server.stop()
def preload_session(filename):
global local_filename
local_filename = filename
if not local_filename.endswith(".json"):
print("You can load only telenium-json files.")
sys.exit(1)
if not os.path.exists(filename):
print("Create new file at {}".format(local_filename))
if os.path.exists(SESSION_FN):
os.unlink(SESSION_FN)
else:
with open(filename) as fd:
session = json.loads(fd.read())
session = upgrade_version(session)
with open(SESSION_FN, "w") as fd:
fd.write(json.dumps(session))
def upgrade_version(session):
# automatically upgrade to latest version
version_format = session.get("version_format")
if version_format is None or version_format == FILE_API_VERSION:
return session
session["version_format"] += 1
version_format = session["version_format"]
print("Upgrade to version {}".format(version_format))
if version_format == 2:
# arg added in steps, so steps must have 3 arguments not 2.
for test in session["tests"]:
for step in test["steps"]:
if len(step) == 2:
step.append(None)
elif version_format == 3:
# arg added in steps, so steps must have 4 arguments not 3.
for test in session["tests"]:
for step in test["steps"]:
if len(step) == 3:
step.append(None)
return session
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
def run():
parser = argparse.ArgumentParser(description="Telenium IDE")
parser.add_argument(
"filename",
type=str,
default=None,
nargs="?",
help="Telenium JSON file")
parser.add_argument(
"--new", action="store_true", help="Start a new session")
parser.add_argument(
"--port", type=int, default=8080, help="Telenium IDE port")
parser.add_argument(
"--notab",
action="store_true",
help="Prevent opening the IDE in the browser")
args = parser.parse_args()
if args.new:
if os.path.exists(SESSION_FN):
os.unlink(SESSION_FN)
if args.filename:
preload_session(args.filename)
server = WebSocketServer(port=args.port, open_webbrowser=not args.notab)
server.run()
server.stop()
if __name__ == "__main__":
run()
|
web.py
|
import os
import sys
import inspect
import tempfile
import shutil
import jinja2
import time
# web content
if sys.version_info[0] >= 3:
# python 3.x
import http.server as SimpleHTTPServer
import socketserver as SocketServer
else:
# python 2.x
import SimpleHTTPServer
import SocketServer
import threading
import webbrowser
import logging
log = logging.getLogger(__name__)
from ..utils import working_dir
from .environment import map_environment, DisplayEnvironment
# Get this file's location
_this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# Set template web-content directory
# note: can be changed prior to calling web_display()
#
# >>> from cqparts.display import web
# >>> web.TEMPLATE_CONTENT_DIR = './my/alternative/template'
# >>> web.web_display(some_thing)
#
# This would typically be used for testing, or development purposes.
TEMPLATE_CONTENT_DIR = os.path.join(_this_path, 'web-template')
SocketServer.TCPServer.allow_reuse_address = True # stops crash on re-use of port
@map_environment(
# named 'cmdline'?
# This is a fallback display environment if no other method is available.
# Therefore it's assumed that the environment that's been detected is a
# no-frills command line.
name='cmdline',
order=99,
condition=lambda: True, # fallback
)
class WebDisplayEnv(DisplayEnvironment):
"""
Display given component in a browser window
This display exports the model, then exposes a http service on *localhost*
for a browser to use.
The http service does not know when the browser window has been closed, so
it will continue to serve the model's data until the user halts the
process with a :class:`KeyboardInterrupt` (by pressing ``Ctrl+C``)
When run, you should see output similar to::
>>> from cqparts.display import WebDisplayEnv
>>> from cqparts_misc.basic.primatives import Cube
>>> WebDisplayEnv().display(Cube())
press [ctrl+c] to stop server
127.0.0.1 - - [27/Dec/2017 16:06:37] "GET / HTTP/1.1" 200 -
Created new window in existing browser session.
127.0.0.1 - - [27/Dec/2017 16:06:39] "GET /model/out.gltf HTTP/1.1" 200 -
127.0.0.1 - - [27/Dec/2017 16:06:39] "GET /model/out.bin HTTP/1.1" 200 -
A new browser window should appear with a render that looks like:
.. image:: /_static/img/web_display.cube.png
Then, when you press ``Ctrl+C``, you should see::
^C[server shutdown successfully]
and any further request on the opened browser window will return
an errorcode 404 (file not found), because the http service has stopped.
"""
def display_callback(self, component, **kwargs):
"""
:param component: the component to render
:type component: :class:`Component <cqparts.Component>`
:param port: port to expose http service on
:type port: :class:`int`
:param autorotate: if ``True``, rendered component will rotate
as if on a turntable.
:type autorotate: :class:`bool`
"""
# Verify Parameter(s)
from .. import Component
if not isinstance(component, Component):
raise TypeError("given component must be a %r, not a %r" % (
Component, type(component)
))
# Parameter defaults
port = kwargs.get('port', 9041)
autorotate = kwargs.get('autorotate', False)
# Create temporary file to host files
temp_dir = tempfile.mkdtemp()
host_dir = os.path.join(temp_dir, 'html')
print("host temp folder: %s" % host_dir)
# Copy template content to temporary location
shutil.copytree(TEMPLATE_CONTENT_DIR, host_dir)
# Export model
exporter = component.exporter('gltf')
exporter(
filename=os.path.join(host_dir, 'model', 'out.gltf'),
embed=False,
)
# Modify templated content
# index.html
with open(os.path.join(host_dir, 'index.html'), 'r') as fh:
index_template = jinja2.Template(fh.read())
with open(os.path.join(host_dir, 'index.html'), 'w') as fh:
# camera location & target
cam_t = [
(((a + b) / 2.0) / 1000) # midpoint (unit: meters)
for (a, b) in zip(exporter.scene_min, exporter.scene_max)
]
cam_p = [
(((b - a) * 1.0) / 1000) + t # max point * 200% (unit: meters)
for (a, b, t) in zip(exporter.scene_min, exporter.scene_max, cam_t)
]
# write
xzy = lambda a: (a[0], a[2], -a[1]) # x,z,y coordinates (not x,y,z)
fh.write(index_template.render(
model_filename='model/out.gltf',
autorotate = str(autorotate).lower(),
camera_target=','.join("%g" % (val) for val in xzy(cam_t)),
camera_pos=','.join("%g" % (val) for val in xzy(cam_p)),
))
try:
# Start web-service (loop forever)
server = SocketServer.ThreadingTCPServer(
server_address=("0.0.0.0", port),
RequestHandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
)
server_addr = "http://%s:%i/" % server.server_address
def thread_target():
with working_dir(host_dir):
server.serve_forever()
print("serving: %s" % server_addr)
sys.stdout.flush()
server_thread = threading.Thread(target=thread_target)
server_thread.daemon = True
server_thread.start()
# Open in browser
print("opening in browser: %s" % server_addr)
sys.stdout.flush()
webbrowser.open(server_addr)
# workaround for https://github.com/dcowden/cadquery/issues/211
import signal
def _handler_sigint(signum, frame):
raise KeyboardInterrupt()
signal.signal(signal.SIGINT, _handler_sigint)
print("press [ctrl+c] to stop server")
sys.stdout.flush()
while True: # wait for Ctrl+C
time.sleep(1)
except KeyboardInterrupt:
log.info("\n[keyboard interrupt]")
sys.stdout.flush()
finally:
# Stop web-service
server.shutdown()
server.server_close()
server_thread.join()
print("[server shutdown successfully]")
# Delete temporary content
if os.path.exists(os.path.join(host_dir, 'cqparts-display.txt')):
# just making sure we're deleting the right folder
shutil.rmtree(temp_dir)
|
punctuation_capitalization_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'BertPunctuationCapitalizationDataset',
'LABEL_ID_DIR_FOR_NEMO_CHECKPOINT',
'Progress',
'PunctuationCapitalizationEvalDataConfig',
'PunctuationCapitalizationTrainDataConfig',
'create_label_ids',
'create_masks_and_segment_ids',
'is_legacy_data_config',
'legacy_data_config_to_new_data_config',
'load_label_ids',
'raise_not_equal_labels_error',
'save_label_ids',
]
import itertools
import multiprocessing as mp
import os
import pickle
import random
from dataclasses import dataclass
from math import ceil
from pathlib import Path
from queue import Empty
from time import sleep
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from omegaconf import MISSING, DictConfig, OmegaConf
from tqdm import tqdm
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
MAX_NUM_QUERIES_IN_SPLIT = 10 ** 4
TOKENIZATION_PROGRESS_REPORT_PERIOD = 10 ** 3
BATCH_MARK_UP_PROGRESS_REPORT_PERIOD = 10 ** 4
BATCH_BUILDING_PROGRESS_REPORT_PERIOD = 10 ** 4
LABEL_ID_DIR_FOR_NEMO_CHECKPOINT = "label_id_files_for_nemo_checkpoint"
@dataclass
class PunctuationCapitalizationDataConfigBase:
"""A base class for punctuation and capitalization data configs. This class does not define ``ds_item``
attribute which works differently for train and evaluation data."""
###################################################
# PARAMETERS COMMON FOR REGULAR AND TARRED DATASETS
###################################################
use_tarred_dataset: bool = MISSING
"""Whether to use tarred dataset. If True, then you should provide ``tar_metadata_file``. Otherwise, you should
provide ``text_file``, ``labels_file``, ``tokens_in_batch``."""
label_info_save_dir: Optional[str] = None
"""A path to a directory where files created during dataset processing are stored. These files include label id
files and label stats files. By default, it is a directory containing ``text_file`` or ``tar_metadata_file``.
You may need this parameter if dataset directory is read-only and thus does not allow saving anything near dataset
files"""
#################################################
# REGULAR DATASET PARAMETERS
#################################################
text_file: Optional[str] = None
"""A path to a file with source text data without punctuation and capitalization."""
labels_file: Optional[str] = None
"""A path to a file with punctuation and capitalization labels in NeMo format. NeMo format is described in
`documentation
<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format>`_
"""
tokens_in_batch: Optional[int] = None
"""Number of tokens in a batch including paddings and special tokens ([CLS], [SEP], [UNK]). This config does
not have ``batch_size`` parameter."""
max_seq_length: int = 512
"""Max number of tokens in a source sequence. ``max_seq_length`` includes [CLS] and [SEP] tokens. Sequences
which are too long will be clipped by removal of tokens from the end of a sequence."""
num_samples: int = -1
"""A number of samples loaded from ``text_file`` and ``labels_file`` which are used in the dataset. If this
parameter equals ``-1``, then all samples are used."""
use_cache: bool = True
"""Whether to use pickled features. If pickled features file does not exist or ``use_cache=False``, then features
are pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in words),
encoded punctuation and capitalization labels, label ids. Features creation consumes considerable time and this
``use_cache=True`` significantly speeds up training starting. Pickled features are also used for sharing features
between processes if data parallel training is used."""
cache_dir: Optional[str] = None
"""A path to a directory containing cache or directory where newly created cache is saved. By default, it is
a directory containing ``text_file``. You may need this parameter if cache for a dataset is going to be created
and the dataset directory is read-only.
``cache_dir`` and ``label_info_save_dir`` are separate parameters for the case when a cache is ready and this cache
is stored in a read only directory. In this case you will separate ``label_info_save_dir``."""
get_label_frequences: bool = False
"""Whether to show and save label frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir``"""
verbose: bool = True
"""If ``True`` dataset instance will print progress messages and examples of acquired features."""
n_jobs: Optional[int] = 0
"""Number of workers used for features creation (tokenization, label encoding, and clipping). If 0, then
multiprocessing is not used; if ``None``, then n_jobs is equal to the number of CPU cores.
There can be weird deadlocking errors with some tokenizers (e.g. SentencePiece) if ``n_jobs`` is greater than zero.
"""
#################################################
# TARRED DATASET PARAMETERS
#################################################
tar_metadata_file: Optional[str] = None
"""A path to tarred dataset metadata file. Tarred metadata file and other parts of tarred dataset are usually
created by the script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_
"""
tar_shuffle_n: int = 1
"""The size of shuffle buffer of `webdataset`. The number of batches which are permuted."""
#################################################
# PYTORCH DATALOADER PARAMETERS
#################################################
shuffle: bool = True
"""Shuffle batches every epoch. For regular training datasets, the parameter also activates batch repacking every
epoch. For tarred dataset, it would be only batches permutation."""
drop_last: bool = False
"""In cases when data parallelism is used, ``drop_last`` defines the way data pipeline behaves when some replicas
are out of data and some are not. If ``drop_last`` is ``True``, then epoch ends in the moment when any replica runs
out of data. If ``drop_last`` is ``False``, then the replica will replace missing batch with a batch from a pool of
batches that the replica has already processed. If data parallelism is not used, then parameter ``drop_last`` does
not do anything. For more information see ``torch.utils.data.distributed.DistributedSampler``"""
pin_memory: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
num_workers: int = 8
"""See ``torch.utils.data.DataLoader`` documentation."""
persistent_workers: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
@dataclass
class PunctuationCapitalizationTrainDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[str] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay."""
@dataclass
class PunctuationCapitalizationEvalDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay. ``Any`` = ``str`` or
``List[str]``. If a ``List[str]``, then the model is tested or validated on several datasets."""
def is_legacy_data_config(ds_section: DictConfig) -> bool:
return 'use_tarred_dataset' not in ds_section
def legacy_data_config_to_new_data_config(
ds_section: DictConfig, legacy_dataset_section: DictConfig, train: bool
) -> DictConfig:
"""
Transform old style dataset to new format dataset.
Args:
ds_section: a ds section (``train_ds``, or ``validation_ds``, or ``test_ds``) from old style config. Such
section contain ``batch_size`` parameter.
legacy_dataset_section: a ``model.dataset`` section. ``model.dataset`` section contains ``data_dir`` parameter
train: ``True`` if ``train_ds`` is transformed and ``False`` otherwise
Returns:
New format dataset based on either ``PunctuationCapitalizationTrainDataConfig`` (``train=True``) or
``PunctuationCapitalizationEvalDataConfig`` (``train=False``)
"""
if train:
cls = PunctuationCapitalizationTrainDataConfig
ds_item = legacy_dataset_section.get('data_dir')
else:
cls = PunctuationCapitalizationEvalDataConfig
ds_item = ds_section.get('ds_item')
ds_item = legacy_dataset_section.get('data_dir') if ds_item is None else ds_item
if ds_item is None:
raise ValueError(
f"Data directory was not found in legacy config.\nspecific dataset configuration:\n"
f"{OmegaConf.to_yaml(ds_section)}\nmodel.dataset:\n{OmegaConf.to_yaml(legacy_dataset_section)}"
)
new_config = OmegaConf.structured(
cls(
use_tarred_dataset=False,
text_file=ds_section.text_file,
labels_file=ds_section.labels_file,
ds_item=ds_item,
max_seq_length=legacy_dataset_section.get(
'max_seq_length', PunctuationCapitalizationDataConfigBase.max_seq_length
),
)
)
return new_config
def _check_number_of_labels(
words: List[str],
query: str,
qi: int,
split_i: int,
punctuation_labels: List[str],
capitalization_labels: List[str],
) -> None:
if len(words) != len(punctuation_labels):
raise ValueError(
f"Number of punctuation labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of punctuation labels: "
f"{len(punctuation_labels)}. First 100 characters of the query: '{query[:100]}', punctuation labels: "
f"'{punctuation_labels}'"
)
if len(words) != len(capitalization_labels):
raise ValueError(
f"Number of capitalization labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of capitalization labels: "
f"{len(capitalization_labels)}. First 100 characters of the query: '{query[:100]}', "
f"capitalization labels: '{capitalization_labels}'"
)
def _show_prog(queues: Tuple[mp.Queue, ...], totals: List[int], descriptions: List[str], units: List[str]) -> None:
"""
Show several ``tqdm`` progress bars.
Args:
queues: a list of queues by which progress is delivered into this function. Each queue is responsible for one
progress bar. ``show_prog`` function extracts integers from ``queues`` elements and adds them to progress
bars. If value extracted from a queue equals ``-1``, then corresponding progress bar is closed. When all
progress bars are closed, this function returns.
totals: list of values 100% of progress bars. See more in a description of ``total`` parameter of
``tqdm.tqdm`` function
descriptions: list of descriptions of progress bars. See more in a description of ``desc`` parameter of
``tqdm.tqdm`` function
units: list of progress bar units. See more in a description of ``unit`` parameter of ``tqdm.tqdm`` function
"""
if not all([len(queues) == len(v) for v in [totals, descriptions, units]]):
raise ValueError(
f"All of parameters `queues`, `total_num_lines`, `descriptions`, `units` have to have equal lengths. "
f"len(queues)={len(queues)}, len(total_num_lines)={len(totals)}, "
f"len(descriptions)={len(descriptions)}, len(units)={len(units)}."
)
prog = [
tqdm(total=tt, desc=dd, unit=uu, unit_scale=True, position=i)
for i, (tt, dd, uu) in enumerate(zip(totals, descriptions, units))
]
finished = [False] * len(queues)
while True:
for i, queue in enumerate(queues):
stop = False
to_add = 0
try:
v = queue.get(block=False)
while v != -1:
to_add += v
v = queue.get(block=False)
stop = True
except Empty:
if to_add == 0 and not stop:
continue
prog[i].n += to_add
prog[i].update(0)
if prog[i].n >= totals[i]:
finished[i] = True
prog[i].close()
if stop:
if prog[i].n < totals[i]:
logging.warning(
f"Progress with description '{descriptions[i]}' terminated before progress bar "
f"reached 100%. prog.n={prog[i].n}, total_num_lines={totals[i]}"
)
finished[i] = True
prog[i].close()
if all(finished):
break
sleep(0.1)
class Progress:
"""
Manages several ``tqdm`` progress bars for multi process tasks. This class can be used as context manager.
The class starts separate process which creates and updates progress bars. Information to progress process is
passed via multiprocessing queues. There is a separate queue for every progress bar.
You can use it as context manager:
.. code-block:: python
with Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) as progress_queues:
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
Or without context manager:
.. code-block:: python
progress = Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"])
progress_queues = progress.get_queue()
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
progress.finish()
In a worker function you will have to put number of processed items into the progress queues. For example:
.. code-block:: python
def worker_func(my_datum, parrot_progress_queue, frog_progress_queue):
...
for i in range(10):
parrot_progress_queue.put(1)
frog_progress_queue.put(2)
Progress bars and progress process are closed when ``finish`` or ``__exit__`` methods are called.
"""
def __init__(self, total: Union[int, List[int]], desc: Union[str, List[str]], unit: Union[str, List[str]]) -> None:
"""
Starts progress process and creates queues for passing information to the progress process. Number of progress
bars is equal to the max length of lists ``total``, ``desc``, ``unit``. If none of these parameters is a list,
then 1 progress bar is created.
Args:
total: a list of ``int`` which length is equal to the number of progress bars OR an ``int`` OR a list of
one ``int``. Number which comprises 100% of progress bar. When sum of values passed through the
corresponding queue equals ``total`` corresponding progress bar reaches 100%. If ``total`` is an
``int`` or a list of one element, then all progress bars have equal ``total`` parameter.
desc: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. Description of a progress bar which is showed as a prefix. See more in description of
parameter ``desc`` of function ``tqdm.tqdm``.
unit: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. A unit of a progress bar. See more in description of parameter ``unit`` of function
``tqdm.tqdm``.
"""
if not isinstance(total, list):
total = [total]
if not isinstance(desc, list):
desc = [desc]
if not isinstance(unit, list):
unit = [unit]
num_processes = max([len(total), len(desc), len(unit)])
for param in [total, desc, unit]:
if len(param) not in [num_processes, 1]:
raise ValueError(
f"If parameter of `Progress.__init__` method is a list, then it has to be the same length as other "
f"parameters which are lists"
)
if len(param) == 1:
param *= num_processes
manager = mp.Manager()
self.progress_queues = tuple(manager.Queue() for _ in range(num_processes))
self.progress_process = mp.Process(target=_show_prog, args=(self.progress_queues, total, desc, unit))
self.progress_process.start()
def __enter__(self) -> Tuple[mp.Queue, ...]:
return self.get_queues()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.finish()
def get_queues(self) -> Tuple[mp.Queue, ...]:
return self.progress_queues
def finish(self) -> None:
for q in self.progress_queues:
q.put(-1)
self.progress_process.join()
class TokenizeCreateMasksClipWorker:
"""A worker for tokenization, encoding labels, creating masks for first token in a word, sequence clipping"""
def __init__(
self,
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
pad_label: str,
verbose: bool,
progress_queue: mp.Queue,
) -> None:
"""
Args:
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of
tokens in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence
are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
verbose: whether to report when the worker finishes its job
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
"""
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
self.punct_label_ids = punct_label_ids
self.capit_label_ids = capit_label_ids
self.pad_label = pad_label
self.verbose = verbose
self.progress_queue = progress_queue
def _maybe_clip(self, values: List[int], append_value: int) -> List[int]:
if len(values) > self.max_seq_length:
return values[: self.max_seq_length - 1] + [append_value]
return values
def __call__(
self,
queries: List[str],
punct_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
capit_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
split_i: int,
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenize, clip, encode labels, and create masks of first tokens in words.
Args:
queries: text sequences
punct_label_lines: a list or a tuple of labels for every word in a sequence (str)
capit_label_lines: a list of a tuple labels for every word in a sequence (str)
split_i: number of a split which is processed. Used for logging
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of the corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in
one word have identical labels
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens
in one word have identical labels
"""
all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels = [], [], [], []
progress_made = 0
for i, query in enumerate(queries):
words = query.split()
input_ids, subtokens_mask = [self.tokenizer.cls_id], [0]
_check_number_of_labels(words, query, i, split_i, punct_label_lines[i], capit_label_lines[i])
pad_id = self.punct_label_ids[self.pad_label]
punct_labels = [pad_id]
punct_query_labels = [self.punct_label_ids[lab] for lab in punct_label_lines[i]]
capit_labels = [pad_id]
capit_query_labels = [self.capit_label_ids[lab] for lab in capit_label_lines[i]]
for j, word in enumerate(words):
word_ids = self.tokenizer.text_to_ids(word)
if not word_ids and len(word):
word_ids = [self.tokenizer.unk_id]
input_ids.extend(word_ids)
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_ids) - 1))
punct_labels.extend([punct_query_labels[j]] * len(word_ids))
capit_labels.extend([capit_query_labels[j]] * len(word_ids))
# add eos token
input_ids.append(self.tokenizer.sep_id)
subtokens_mask.append(0)
all_input_ids.append(np.array(self._maybe_clip(input_ids, self.tokenizer.sep_id), dtype=np.int32))
all_subtokens_mask.append(np.array(self._maybe_clip(subtokens_mask, 0), dtype=bool))
punct_labels.append(pad_id)
punct_all_labels.append(np.array(self._maybe_clip(punct_labels, pad_id), dtype=np.int32))
capit_labels.append(pad_id)
capit_all_labels.append(np.array(self._maybe_clip(capit_labels, pad_id), dtype=np.int32))
progress_made += 1
if progress_made >= TOKENIZATION_PROGRESS_REPORT_PERIOD:
self.progress_queue.put(progress_made)
progress_made = 0
self.progress_queue.put(progress_made)
if self.verbose:
logging.info(f"Finished processing data split number {split_i}")
return all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels
def _get_features(
queries: Union[List[str], Tuple[str, ...]],
punct_label_lines: Union[List[str], Tuple[str, ...]],
capit_label_lines: Union[List[str], Tuple[str, ...]],
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Dict[str, int] = None,
capit_label_ids: Dict[str, int] = None,
pad_label: str = 'O',
verbose: bool = True,
n_jobs: Optional[int] = 0,
progress_queue: Optional[mp.Queue] = None,
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenizes data, encodes labels, creates masks of first tokens in words, clips sequences by number of tokens.
Args:
queries: text sequences
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of tokens
in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
punct_label_lines: a list of a tuple of labels for every word in a sequence (str)
capit_label_lines: a list or a tuple of labels for every word in a sequence (str)
verbose: whether to show examples of tokenized data and various progress information
n_jobs: a number of workers used for preparing features. If ``n_jobs <= 0``, then do not use multiprocessing
and run features creation in this process. If not set, number of workers will be equal to the number of
CPUs.
!!WARNING!!
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in one
word have identical labels.
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens in
one word have identical labels
"""
if verbose:
logging.info("Start initial tokenization.")
create_progress_process = progress_queue is None
if n_jobs is None:
n_jobs = min(mp.cpu_count(), len(queries))
if verbose:
logging.info(f"Running tokenization with {n_jobs} jobs.")
# Number of queries in split
split_size = min(len(queries) // max(n_jobs, 1), MAX_NUM_QUERIES_IN_SPLIT)
n_split = len(queries) // split_size
split_queries = [queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [
queries[split_size * (n_split - 1) :]
]
split_punct_labels_lines = [
punct_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [punct_label_lines[split_size * (n_split - 1) :]]
split_capit_labels_lines = [
capit_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [capit_label_lines[split_size * (n_split - 1) :]]
args = list(zip(split_queries, split_punct_labels_lines, split_capit_labels_lines, range(n_split)))
if create_progress_process:
progress = Progress(len(queries), "Tokenization", "query")
progress_queue = progress.get_queues()[0]
if n_jobs > 0:
with mp.Pool(n_jobs) as pool:
result = pool.starmap(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue
),
args,
)
else:
result = []
for x in args:
result.append(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue,
)(*x)
)
if create_progress_process:
progress.finish()
input_ids, subtokens_mask, punct_labels, capit_labels = tuple(list(itertools.chain(*e)) for e in zip(*result))
if verbose:
logging.info("Finished initial tokenization.")
get_stats([len(inp) for inp in input_ids])
logging.info(f"Finished clipping and padding.")
for i in range(min(len(input_ids), 5)):
logging.info("*** Example ***")
logging.info("i: %s" % (i))
logging.info("subtokens: %s" % " ".join(list(map(str, input_ids[i]))))
logging.info("subtokens_mask: %s" % " ".join(list(map(str, subtokens_mask[i]))))
logging.info("punct_labels: %s" % " ".join(list(map(str, punct_labels[i]))))
logging.info("capit_labels: %s" % " ".join(list(map(str, capit_labels[i]))))
return input_ids, subtokens_mask, punct_labels, capit_labels
def create_masks_and_segment_ids(
input_ids: np.ndarray,
subtokens_mask: np.ndarray,
pad_id: int,
cls_id: int,
sep_id: int,
ignore_start_end: bool,
ignore_extra_tokens: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates segment ids array, input mask, loss mask.
Segment ids array is BERT token type ids in HuggingFace terminology. It is a zeros array for punctuation
and capitalization task.
Input mask element is ``True`` if an element of ``input_ids`` is not padding and ``False`` otherwise.
Loss mask element is ``True`` for the first token in a word. If ``ignore_start_end=False``, then loss mask
element is ``True`` for [CLS] and [SEP] tokens. If ``ignore_extra_tokens=False``, then loss mask element is ``True``
for all word tokens. In all other cases loss mask elements are ``False``.
Args:
input_ids: an integer array of shape ``[Batch, Time]`` containing ids of source token ids
subtokens_mask: a boolean array of shape ``[Batch, Time]`` which elements are ``True`` if they correspond to
the first token of some word
pad_id: an id of padding token
cls_id: an id of [CLS] token
sep_id: an id of [SEP] token
ignore_start_end: whether to compute loss for [CLS] and [SEP] tokens
ignore_extra_tokens: whether to compute loss for not first tokens in words
Returns:
segment_ids: int8 array of shape [Batch, Time]
input_mask: boolean array of shape [Batch, Time]
loss_mask: boolean array of shape [Batch, Time]
"""
segment_ids = np.zeros_like(input_ids, dtype=np.int8)
input_mask = np.not_equal(input_ids, pad_id)
special_mask = np.equal(input_ids, cls_id) & np.equal(input_ids, sep_id)
if ignore_start_end:
if ignore_extra_tokens:
loss_mask = subtokens_mask
else:
loss_mask = input_mask & ~special_mask
else:
if ignore_extra_tokens:
loss_mask = subtokens_mask | special_mask
else:
loss_mask = input_mask
return segment_ids, input_mask, loss_mask
def create_label_ids(unique_labels: Set[str], pad_label: str) -> Dict[str, int]:
"""
Returns label ids dictionary. ``pad_label`` always has id ``0``. Other labels are sorted in alphabetical order.
Args:
unique_labels: a set of labels from which label ids dictionary is created. May or may no contain ``pad_label``
pad_label: label used for padding. It is also a neutral label
Returns:
label ids dictionary
"""
label_ids = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids[label] = len(label_ids)
return label_ids
def load_label_ids(file_path: Union[str, os.PathLike]) -> Dict[str, int]:
ids = {}
with open(file_path) as f:
for i, line in enumerate(f):
ids[line.strip()] = i
return ids
def save_label_ids(label_ids: Dict[str, int], file_path: Path) -> None:
"""
Saves label ids map to a file. In each line of a file one label is saved. Labels are saved in the order of
increasing of their ids.
Args:
label_ids: label id dictionary. Pad label has to have id ``0``
file_path: path to a file where labels will be saved
"""
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
def raise_not_equal_labels_error(
first_labels: Dict[str, int], second_labels: Dict[str, int], first_labels_desc: str, second_labels_desc: str
) -> None:
"""
A helper function for raising comprehensible error if labels from 2 sources are different.
Such sources may include:
- labels stored in .nemo checkpoint
- labels stored in tarred dataset
- labels passed in config parameters ``model.common_dataset_parameters.{punct_label_ids,capit_label_ids}``
- labels from files passed in config parameters ``model.class_labels.{punct_labels_file,capit_labels_file}``
- labels in attributes ``PunctuationCapitalizationModel.{punct_label_ids,capit_label_ids}``
- any other source
This function helps to detect configuration early and give error messages that are easy to interpret.
Call this function if ``first_labels != second_labels``.
Args:
first_labels: first dictionary with labels
second_labels: second dictionary with labels
first_labels_desc: a description of first labels
second_labels_desc: a description of second labels
"""
missing_in_first = {k: second_labels[k] for k in set(second_labels) - set(first_labels)}
missing_in_second = {k: first_labels[k] for k in set(first_labels) - set(second_labels)}
not_equal = {
k: {'FIRST LABELS': first_labels[k], 'SECOND LABELS': second_labels[k]}
for k in set(first_labels) & set(second_labels)
if first_labels[k] != second_labels[k]
}
msg = f"{first_labels_desc} (FIRST LABELS) are not equal to {second_labels_desc} (SECOND LABELS)."
if len(missing_in_first) > 0:
msg += f" Number of SECOND LABELS missing in the FIRST LABELS: {len(missing_in_first)}."
if len(missing_in_second) > 0:
msg += f" Number of FIRST LABELS missing in the SECOND LABELS: {len(missing_in_second)}."
if len(not_equal) > 0:
msg += f" Number of labels which are not equal: {len(not_equal)}."
if len(missing_in_first) > 0:
msg += (
f" Several examples of missing SECONDS LABELS in the FIRST LABELS: "
f"{dict(list(missing_in_first.items())[:3])}."
)
if len(missing_in_second) > 0:
msg += (
f" Several examples of missing FIRST LABELS in the SECOND LABELS: "
f"{dict(list(missing_in_second.items())[:3])}."
)
if len(not_equal) > 0:
msg += f" Several examples of labels which are not equal: {dict(list(not_equal.items())[:3])}"
raise ValueError(msg)
def pad(vectors: List[np.ndarray], length: int, value: Union[int, float, bool]) -> np.ndarray:
"""
Pad vectors to length ``length`` and then stack.
Args:
vectors: a list of 1D arrays. Arrays to pad and stack
length: a length of padded sequence. Has to be greater or equal to the maximum length of an element of
``vectors``.
value: a value used for padding
Returns:
an array of padded vectors
"""
result = []
for v in vectors:
result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)]))
return np.stack(result)
class BertPunctuationCapitalizationDataset(Dataset):
"""
A dataset to use during training for punctuation and capitalization tasks.
For inference, you will need
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`.
For huge datasets which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text
without punctuation and capitalization
labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word
labels for a sentence in the ``text_file``. Labels have to follow format described in this section of
documentation :ref:`NeMo Data Format<nemo-data-format-label>`.
max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS]
and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the
sequence.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``,
``eos_id``.
num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the
dataset. If ``-1``, use all dataset. Useful for testing.
tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including
paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not
samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input
sequences are short, then a batch will contain more samples. Before packing into batches, samples are
sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch
significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content.
Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral
label both for punctuation and capitalization.
punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set,
use label ids generated during training to support cases when not all labels are present in the dev set.
For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on
tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized
into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is
``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens
in the loss_mask.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features already present
in ``cache_dir`` or not. If pickled features file does not exist or ``use_cache=False``, then features are
pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in
words), encoded punctuation and capitalization labels, label ids. Features creation consumes considerable
time and this ``use_cache=True`` significantly speeds up training starting. Pickled features are also
used for sharing features between processes if data parallel training is used.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features)
is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset
directory is read-only and you wish to pickle features. In such a case specify a path to directory which
allows writing in ``cache_dir`` parameter.
get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label
frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory.
label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies
are saved. Be default a ``text_file`` parent directory is used. When method
:meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir``
directory. Parameters ``cache_dir`` and ``label_info_save_dir`` are added for cases when directory
containing. This parameter is useful if directory containing ``text_file`` is read-only.
punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing
punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first
line has to contain `pad_label`, otherwise error will be raised.
capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add
``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred
dataset and can NOT be used during model training and inference.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and
other useful information.
n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding
labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed
without multiprocessing. By default ``n_jobs`` is equal to the number of CPUs.
.. warning::
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization
progress. Useful for creation of tarred dataset
batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
deciding which samples batches will contain. Useful for creation of tarred dataset
batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
batch creation (stacking and padding). Useful for creation of tarred dataset
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
tokens_in_batch: int = 5000,
pad_label: str = 'O',
punct_label_ids: Optional[Dict[str, int]] = None,
capit_label_ids: Optional[Dict[str, int]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
use_cache: bool = True,
cache_dir: Optional[Union[str, os.PathLike]] = None,
get_label_frequencies: bool = False,
label_info_save_dir: Optional[Union[str, os.PathLike]] = None,
punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
add_masks_and_segment_ids_to_batch: bool = True,
verbose: bool = True,
n_jobs: Optional[int] = 0,
tokenization_progress_queue: Optional[mp.Queue] = None,
batch_mark_up_progress_queue: Optional[mp.Queue] = None,
batch_building_progress_queue: Optional[mp.Queue] = None,
) -> None:
""" Initializes BertPunctuationCapitalizationDataset. """
self._check_constructor_parameters(
text_file,
labels_file,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
num_samples,
use_cache,
)
if punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_vocab_file is not None:
capit_label_vocab_file = Path(capit_label_vocab_file).expanduser()
capit_label_ids = load_label_ids(capit_label_vocab_file)
text_file, labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
if label_info_save_dir is None:
self.label_info_save_dir = text_file.parent
else:
self.label_info_save_dir = Path(label_info_save_dir).expanduser()
self.tokens_in_batch = tokens_in_batch
self.tokenizer = tokenizer
self.pad_label = pad_label
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch
self.verbose = verbose
self.batch_mark_up_progress_queue = batch_mark_up_progress_queue
self.batch_building_progress_queue = batch_building_progress_queue
master_device = is_global_rank_zero()
features_pkl = self._get_path_to_pkl_features(text_file, cache_dir, max_seq_length, num_samples)
features = None
if master_device and not (features_pkl.is_file() and use_cache):
if verbose:
logging.info(f'Processing {text_file}')
res = self._read_dataset(text_file, labels_file, num_samples)
text_lines, punct_label_lines, capit_label_lines, punct_unique_labels, capit_unique_labels = res
if punct_label_ids:
self._check_label_ids_vs_unique_labels(
punct_label_ids, punct_unique_labels, 'punct', 'punctuation', labels_file
)
else:
punct_label_ids = create_label_ids(punct_unique_labels, self.pad_label)
if capit_label_ids:
self._check_label_ids_vs_unique_labels(
capit_label_ids, capit_unique_labels, 'capit', 'capitalzation', labels_file
)
else:
capit_label_ids = create_label_ids(capit_unique_labels, self.pad_label)
features = _get_features(
text_lines,
punct_label_lines,
capit_label_lines,
max_seq_length,
self.tokenizer,
pad_label=self.pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
verbose=self.verbose,
progress_queue=tokenization_progress_queue,
n_jobs=n_jobs,
)
features_pkl.parent.mkdir(parents=True, exist_ok=True)
pickle.dump(tuple(list(features) + [punct_label_ids, capit_label_ids]), open(features_pkl, "wb"))
if self.verbose:
logging.info(f'Features saved to {features_pkl}')
# wait until the master process writes to the processed data files
if torch.distributed.is_initialized():
torch.distributed.barrier()
if features is None:
features = pickle.load(open(features_pkl, 'rb'))
li = features[-2:]
self._check_label_ids_loaded_from_pkl(
punct_label_ids, capit_label_ids, *li, punct_label_vocab_file, capit_label_vocab_file, features_pkl
)
punct_label_ids, capit_label_ids = li[-2], li[-1]
if tokenization_progress_queue is not None:
tokenization_progress_queue.put(len(features[0]))
if self.verbose:
logging.info(f'Features restored from {features_pkl}')
features = features[:-2]
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels = features
self.punct_label_ids, self.capit_label_ids = punct_label_ids, capit_label_ids
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
if get_label_frequencies:
self.punct_label_frequencies = self._calculate_and_save_label_frequencies(self.punct_labels, 'punct')
self.capit_label_frequencies = self._calculate_and_save_label_frequencies(self.capit_labels, 'capit')
def _get_path_to_pkl_features(
self, text_file: Path, cache_dir: Optional[Union[str, os.PathLike]], max_seq_length: int, num_samples: int
) -> Path:
if cache_dir is None:
cache_dir = text_file.parent
else:
cache_dir = Path(cache_dir).expanduser()
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
features_pkl = cache_dir / "cached.{}.{}.max_seq_length{}.vocab{}.{}.punctuation_capitalization.pkl".format(
text_file.stem,
self.tokenizer.name,
max_seq_length,
vocab_size,
f'num_samples{num_samples}' if num_samples > 0 else 'all_samples',
)
return features_pkl
@staticmethod
def _check_constructor_parameters(
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
punct_label_vocab_file: Union[str, os.PathLike],
capit_label_vocab_file: Union[str, os.PathLike],
num_samples: int,
use_cache: bool,
) -> None:
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and not use_cache:
raise ValueError(
f"If you already created process group and the world size is greater than 1, then `use_cache` "
f"parameter has to `True`. Only master process prepares features and if `use_cache=False`, then "
f"other processes will not be able to obtain features. Alternatively, you may set `use_cache=False` "
f"and set up data before spawning processes. Use `cache_dir` dataset directory with "
f"`text_file` and `labels_file` is read-only."
)
if not (os.path.exists(text_file) and os.path.exists(labels_file)):
raise FileNotFoundError(
f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and'
f'labels.txt. Each line of the text.txt file contains text sequences, where words are separated with'
f'spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are'
f'separated with spaces. Each line of the files should follow the format:\n'
f' [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and '
f' [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
if not str(text_file).endswith('.txt'):
raise ValueError(
f"Parameter `text_file` has to be path to a file with .txt extension, whereas `text_file={text_file}`"
)
if not str(labels_file).endswith('.txt'):
raise ValueError(
f"Parameter `labels_file` has to be path to a file with .txt extension, whereas "
f"`labels_file={labels_file}`"
)
if punct_label_ids is not None and punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
file_punct_label_ids = load_label_ids(punct_label_vocab_file)
if file_punct_label_ids != punct_label_ids:
raise_not_equal_labels_error(
first_labels=punct_label_ids,
second_labels=file_punct_label_ids,
first_labels_desc='Punctuation labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `punct_label_ids`',
second_labels_desc=f'Punctuation labels loaded from file {punct_label_vocab_file} path to which '
f'is passed in parameter `punct_label_vocab_file`',
)
if capit_label_ids is not None and capit_label_vocab_file is not None:
capit_vocab_file = Path(capit_label_vocab_file).expanduser()
file_capit_label_ids = load_label_ids(capit_vocab_file)
if file_capit_label_ids != capit_label_ids:
raise_not_equal_labels_error(
first_labels=capit_label_ids,
second_labels=file_capit_label_ids,
first_labels_desc='Capitalization labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `capit_label_ids`',
second_labels_desc=f'Capitalization labels loaded from file {capit_label_vocab_file} path to '
f'which is passed in parameter `capit_label_vocab_file`',
)
if num_samples == 0:
raise ValueError(
f"Parameter `num_samples` has to be positive or negative whereas `num_samples={num_samples}`. "
f"Negative `num_samples` is for using all samples in a dataset."
)
@staticmethod
def _check_label_ids_loaded_from_pkl(
parameter_punct_label_ids: Dict[str, int],
parameter_capit_label_ids: Dict[str, int],
pkl_punct_label_ids: Any,
pkl_capit_label_ids: Any,
punct_label_vocab_file: Optional[Path],
capit_label_vocab_file: Optional[Path],
features_file: Path,
) -> None:
if not isinstance(pkl_punct_label_ids, dict):
raise ValueError(
f"Punctuation label ids loaded from features file {features_file} has wrong type "
f"{type(pkl_punct_label_ids)}"
)
if parameter_punct_label_ids is not None:
if parameter_punct_label_ids != pkl_punct_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_punct_label_ids,
second_labels=pkl_punct_label_ids,
first_labels_desc="Punctuation labels passed in parameter `punct_label_ids`"
if punct_label_vocab_file is None
else f"Punctuation labels loaded from file {punct_label_vocab_file}",
second_labels_desc=f"Punctuation label ids loaded from features file {features_file}",
)
if not isinstance(pkl_capit_label_ids, dict):
raise ValueError(
f"Capitalization label ids loaded from features file {features_file} has wrong type "
f"{type(pkl_capit_label_ids)}"
)
if parameter_capit_label_ids is not None:
if parameter_capit_label_ids != pkl_capit_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_capit_label_ids,
second_labels=pkl_capit_label_ids,
first_labels_desc="Capitalization labels passed in parameter `capit_label_ids`"
if capit_label_vocab_file is None
else f"Capitalization labels loaded from file {capit_label_vocab_file}",
second_labels_desc=f"Capitalization label ids loaded from features file {features_file}",
)
@staticmethod
def _check_label_ids_vs_unique_labels(
label_ids: Dict[str, int], unique_labels: Set[str], label_type: str, task: str, label_file: Path
) -> None:
if unique_labels - set(label_ids):
not_present_labels = list(unique_labels - set(label_ids))
raise ValueError(
f"{len(not_present_labels)} {task} labels found in {label_file} are not present in "
f"`{label_type}_label_ids`. Examples of unexpected labels from {label_file}: {not_present_labels[:3]}"
)
@staticmethod
def _read_dataset(
text_file: Path, labels_file: Path, num_samples: int
) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...], Set[str], Set[str]]:
with open(text_file, 'r') as f:
text_lines = f.readlines()
punct_unique_labels, capit_unique_labels = set(), set()
punct_labels_lines, capit_labels_lines = [], []
with labels_file.open() as f:
for i, line in enumerate(f):
pairs = line.split()
if not all([len(p) == 2 for p in pairs]):
raise ValueError(
f"Some label pairs are not pairs but have wrong length (!= 2) in line {i} in label file "
f"{labels_file}"
)
words = text_lines[i].split()
if len(pairs) != len(words):
raise ValueError(
f"In line {i} in text file {text_file} number of words {len(words)} is not equal to the "
f"number of labels {len(pairs)} in labels file {labels_file}."
)
punct_line, capit_line = zip(*pairs)
punct_labels_lines.append(punct_line)
capit_labels_lines.append(capit_line)
punct_unique_labels.update(punct_line)
capit_unique_labels.update(capit_line)
if len(punct_labels_lines) != len(text_lines):
raise ValueError(
f"Number of text lines {len(text_lines)} in text file {text_file} is not equal to the number of lines "
f"{len(punct_labels_lines)} in labels file {labels_file}."
)
dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines))
if len(dataset) == 0:
raise ValueError(f"Dataset loaded from files {text_file} and {labels_file} is empty.")
if num_samples > 0:
dataset = dataset[:num_samples]
text_lines, punct_labels_lines, capit_labels_lines = zip(*dataset)
return text_lines, punct_labels_lines, capit_labels_lines, punct_unique_labels, capit_unique_labels
def _mark_up_batches(self, input_ids: List[np.ndarray]) -> Tuple[List[int], List[int], List[int]]:
"""
Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be
sorted by number of tokens in ascending order.
Batches are marked up with respect to following conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via
``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function.
Args:
input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending
order
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes, batch_seq_lengths = [], [], []
current_max_length = 0
start = 0
if self.batch_mark_up_progress_queue is None:
inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query")
else:
inp_iterator = enumerate(input_ids)
progress_made = 0
for i, inp in inp_iterator:
current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8)
if current_max_length * (i + 1 - start) > self.tokens_in_batch:
batch_size = (i - start) // 8 * 8
if batch_size == 0:
if i > start:
batch_size = i - start
logging.warning(
f"Could not create batch with multiple of 8 size. Probably there is a too long sequence in "
f"the dataset. current_max_length={current_max_length}. Batch size will be reduced to "
f"{batch_size}. tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from "
f"{start} to {i - 1}."
)
else:
logging.warning(
f"Input sequence number {i - 1} is too long. Could not fit it into batch with "
f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches."
)
start = i
current_max_length = ceil(len(inp) / 8) * 8
continue
seq_length = ceil(max([len(inp) for inp in input_ids[start : start + batch_size]]) / 8) * 8
batch_beginnings.append(start)
batch_sizes.append(batch_size)
batch_seq_lengths.append(seq_length)
start += batch_size
current_max_length = ceil(max([len(inp) for inp in input_ids[start : i + 1]]) / 8) * 8
if self.batch_mark_up_progress_queue is not None:
progress_made += 1
if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD:
self.batch_mark_up_progress_queue.put(progress_made)
progress_made = 0
if start < len(input_ids):
seq_length = ceil(max([len(inp) for inp in input_ids[start:]]) / 8) * 8
batch_beginnings.append(start)
batch_sizes.append(len(input_ids) - start)
batch_seq_lengths.append(seq_length)
if self.batch_mark_up_progress_queue is not None:
self.batch_mark_up_progress_queue.put(progress_made)
assert sum(batch_sizes) == len(input_ids)
for i in range(len(batch_beginnings) - 1):
assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1]
assert batch_seq_lengths[i] >= max(
[len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]]
)
return batch_beginnings, batch_sizes, batch_seq_lengths
def _pack_into_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
) -> List[Dict[str, np.ndarray]]:
"""
Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following
conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
Created batches are shuffled before returning.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and
``'input_mask'`` are added to the batch.
If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to
``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches``
method.
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
The values of a batch dictionary are numpy arrays of identical shape.
"""
zipped = list(zip(input_ids, subtokens_mask, punct_labels, capit_labels))
random.shuffle(zipped)
input_ids, subtokens_mask, punct_labels, capit_labels = zip(*sorted(zipped, key=lambda x: x[0].shape[0]))
batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids)
batches = []
if self.batch_building_progress_queue is None:
inp_iterator = tqdm(
zip(batch_beginnings, batch_sizes, batch_seq_lengths),
total=len(batch_beginnings),
desc="Batch building",
unit="batch",
)
else:
# In this case we report number of queries not number of batches
inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths)
progress_made = 0
for start, size, length in inp_iterator:
batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id)
batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False)
batch = {
"input_ids": batch_input_ids,
"subtokens_mask": batch_subtokens_mask,
"punct_labels": pad(
punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label]
).astype(np.int64),
"capit_labels": pad(
capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label]
).astype(np.int64),
}
if self.add_masks_and_segment_ids_to_batch:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch_input_ids,
batch_subtokens_mask,
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
batches.append(batch)
if self.batch_building_progress_queue is not None:
progress_made += size
if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD:
self.batch_building_progress_queue.put(progress_made)
progress_made = 0
if self.batch_building_progress_queue is not None:
self.batch_building_progress_queue.put(progress_made)
random.shuffle(batches)
return batches
def repack_batches_with_shuffle(self) -> None:
"""A function for proper shuffling of a dataset. Pytorch data loader shuffing will only permute batches."""
logging.info("Shuffling training dataset")
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
def _calculate_and_save_label_frequencies(self, all_labels: List[np.ndarray], name: str) -> Dict[str, float]:
"""Calculates and saves labels frequencies in :attr:`label_info_save_dir`."""
merged_labels = itertools.chain.from_iterable(all_labels)
if self.verbose:
logging.info('Three most popular labels')
self.label_info_save_dir.mkdir(parents=True, exist_ok=True)
_, label_frequencies, _ = get_label_stats(
merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv')
)
return label_frequencies
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for
``.nemo`` checkpoint creation.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of a punctuation labels file
capit_labels_file_name (:obj:`str`): a name of a capitalization labels file
Returns:
:obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing:
- :obj:`pathlib.Path`: a path to the saved punctuation labels file
- :obj:`pathlib.Path`: a path to the saved capitalization labels file
"""
nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
punct_labels_file = nemo_dir / punct_labels_file_name
capit_labels_file = nemo_dir / capit_labels_file_name
save_label_ids(self.punct_label_ids, punct_labels_file)
save_label_ids(self.capit_label_ids, capit_labels_file)
return punct_labels_file, capit_labels_file
def __len__(self) -> int:
return len(self.batches)
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""
Return zeroth batch from ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``,
``'capit_labels'`` to types supported by
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`.
All output tensors have shape ``[Batch, Time]``.
.. warning::
A ``batch_size`` parameter of a PyTorch data loader and sampler has to be ``1``.
Args:
batches (:obj:`List[Dict[str, np.ndarray]]`): a list containing 1 batch passed for collating
Returns:
:obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch
items see method :meth:`__getitem__`):
- ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor.
"""
batch = {k: torch.as_tensor(v) for k, v in batches[0].items()}
batch['segment_ids'] = batch['segment_ids'].int()
batch['punct_labels'] = batch['punct_labels'].long()
batch['capit_labels'] = batch['capit_labels'].long()
return batch
def __getitem__(self, idx: int) -> Dict[str, np.ndarray]:
"""
Return a batch with index ``idx``. The values of a batch dictionary are numpy arrays of identical shapes
``[Batch, Time]``. Labels are identical for all tokens in a word. For example, if
- word ``'Tokenization'`` is tokenized into tokens ``['token', 'ization']``,
- it is followed by comma,
then punctuation labels are ``[',', ',']`` and capitalization labels are ``['U', 'U']`` (``'U'`` is a label
for words which start with upper case character).
Args:
idx: an index of returned batch
Returns:
:obj:`Dict[str, np.ndarray]`: a dictionary with items:
- ``'input_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded tokens,
- ``'subtokens_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if they
correspond to first token in a word,
- ``'punct_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded punctuation
labels,
- ``'capit_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded capitalization
labels.
- ``'segment_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int8` array filled with zeros (BERT token types
in HuggingFace terminology) (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'input_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if corresponding
token is not a padding token (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'loss_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if loss is
computed for corresponding token. See more in description of constructor parameters
``ignore_start_end``, ``ignore_extra_tokens`` (if ``self.add_masks_and_segment_ids_to_batch`` is
``False``, then this items is missing).
"""
return self.batches[idx]
|
server.py
|
# Import required modules
import socket
import threading
HOST = '127.0.0.1'
PORT = 1234 # You can use any port between 0 to 65535
LISTENER_LIMIT = 5
active_clients = [] # List of all currently connected users
# Function to listen for upcoming messages from a client
def listen_for_messages(client, username):
while 1:
message = client.recv(2048).decode('utf-8')
if message != '':
final_msg = username + '~' + message
send_messages_to_all(final_msg)
else:
print(f"The message send from client {username} is empty")
# Function to send message to a single client
def send_message_to_client(client, message):
client.sendall(message.encode())
# Function to send any new message to all the clients that
# are currently connected to this server
def send_messages_to_all(message):
for user in active_clients:
send_message_to_client(user[1], message)
# Function to handle client
def client_handler(client):
# Server will listen for client message that will
# Contain the username
while 1:
username = client.recv(2048).decode('utf-8')
if username != '':
active_clients.append((username, client))
prompt_message = "SERVER~" + f"{username} added to the chat"
send_messages_to_all(prompt_message)
break
else:
print("Client username is empty")
threading.Thread(target=listen_for_messages, args=(client, username, )).start()
# Main function
def main():
# Creating the socket class object
# AF_INET: we are going to use IPv4 addresses
# SOCK_STREAM: we are using TCP packets for communication
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Creating a try catch block
try:
# Provide the server with an address in the form of
# host IP and port
server.bind((HOST, PORT))
print(f"Running the server on {HOST} {PORT}")
except:
print(f"Unable to bind to host {HOST} and port {PORT}")
# Set server limit
server.listen(LISTENER_LIMIT)
# This while loop will keep listening to client connections
while 1:
client, address = server.accept()
print(f"Successfully connected to client {address[0]} {address[1]}")
threading.Thread(target=client_handler, args=(client, )).start()
if __name__ == '__main__':
main()
|
main.py
|
import numpy as np
from multiprocessing import Process
from sklearn.neighbors import KNeighborsRegressor
from scipy.spatial.distance import euclidean
def filter_data(input, output, distance, pivot_distance, threshold):
training = []
result = []
for i in xrange(0, len(input)):
if euclidean(pivot_distance, distance[i]) > threshold:
training.append(input[i])
result.append(output[i])
return [np.array(training), np.array(result)]
def predict(X, Y, row):
return KNeighborsRegressor(n_neighbors=5).fit(X, Y).predict([row])
def calculate_c_index(predicted, actual):
n = 0.0
h_sum = 0.0
actual_len = len(actual)
for i in xrange(0, actual_len):
# print 'C-index: ' + str(i) + ' out of ' + str(actual_len)
t = actual[i]
p = predicted[i]
for j in xrange(i + 1, actual_len):
nt = actual[j]
np = predicted[j]
if t != nt:
n = n + 1.0
if ((p < np) and (t < nt)) or ((p > np) and (t > nt)):
h_sum = h_sum + 1.0
elif ((p < np) and (t > nt)) or ((p > np) and (t < nt)):
pass
elif p == np:
h_sum = h_sum + 0.5
return h_sum / n
def process(input_set, output_set, distance_set, threshold):
predicted = np.empty(len(output_set))
size = len(input_set)
for i in xrange(0, size):
# print 'Going over ' + str(i) + ' out of ' + str(size)
filtered = filter_data(input_set, output_set, distance_set, distance[i], threshold)
predicted[i] = predict(filtered[0], filtered[1], input_set[i])
c_index = calculate_c_index(predicted, output_set)
print 'C for T = ' + str(threshold) + ' is ' + str(c_index)
if __name__ == '__main__':
input = np.genfromtxt('INPUT.csv', delimiter=',')
output = np.genfromtxt('OUTPUT.csv', delimiter=',')
distance = np.genfromtxt('COORDINATES.csv', delimiter=',')
for t in xrange(0, 210, 10):
p = Process(target = process, args = (input, output, distance, t))
p.start()
p.join()
|
heartbeat.py
|
from threading import Thread, Timer
from requests import post
from requests.exceptions import ConnectionError, ConnectTimeout
from document.exec_env import Exec_Env_Document
from lib.http import HTTP_Status
from lib.token import create_token
from reader.arg import Arg_Reader
from utils.log import Log
def heartbeat():
"""Heartbeat procedure with the LCPs."""
s = Exec_Env_Document.search()
res = s[0:s.count()].execute()
threads = []
for exec_env in res:
if exec_env.lcp:
t = Thread(target=heartbeat_exec_env, args=(exec_env,))
threads.append(t)
t.start()
for t in threads:
t.join()
t = Timer(Arg_Reader.db.hb_period, heartbeat)
t.daemon = True
t.start()
def heartbeat_exec_env(exec_env):
log = Log.get('heartbeat')
try:
id = exec_env.meta.id
lcp = exec_env.lcp
lbl = f'{id} (LCP at {exec_env.hostname}:{lcp.port})'
if exec_env.enabled:
schema = 'https' if lcp.https else 'http'
endpoint_lcp = '/' + exec_env.lcp.endpoint if exec_env.lcp.endpoint else ''
resp = post(f'{schema}://{exec_env.hostname}:{lcp.port}{endpoint_lcp}/status', timeout=Arg_Reader.db.hb_timeout,
headers={'Authorization': create_token()}, json={'id': id})
if resp.status_code == HTTP_Status.OK:
data = resp.json()
id = data.pop('id', None)
lcp.started = data.get('started', None)
lcp.last_heartbeat = data.get('last_heartbeat', None)
log.success(f'Connection established with exec-env {lbl}')
else:
lcp.last_heartbeat = None
log.warning(f'Connection reset with exec-env {lbl}')
log.notice(f'Response: {resp.content}')
if not lcp.https:
lcp.https = False
exec_env.save()
else:
log.notice(f'Exec-env {lbl} not enabled')
except ConnectTimeout:
log.error(f'Connection timeout with exec-env {lbl}')
except ConnectionError:
log.error(f'Connection refused with exec-env {lbl}')
except Exception as exception:
log.exception(f'Exception during connection with exec-env {lbl}', exception)
|
rabbitmq_client.py
|
"""时间: 2019/12/24
作者: liyongfang@cyai.com
更改记录:
重要说明:
"""
import json
import logging
import os
import threading
from django.conf import settings
import django
import pika
from utils import config
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wol_server.settings")
django.setup()
logger = logging.getLogger(settings.ELK_APP_NAME)
CREDENTIALS = pika.PlainCredentials(config.RABBITMQ_USERNAME, config.RABBITMQ_PASSWORD)
PARAMETERS = pika.ConnectionParameters(config.RABBITMQ_HOST, config.RABBITMQ_PORT, config.RABBITMQ_VHOST,
CREDENTIALS, socket_timeout=3)
class RabbitMQConnection(threading.Thread):
"""定义RabbitMQ连接及消息处理类
"""
SINGLETON_CLIENT_CONNECTION = None
SINGLETON_CLIENT_CHANNEL = None
IN_CONNECT = False # flag
LOCK = threading.Lock()
def __init__(self):
threading.Thread.__init__(self)
@classmethod
def _reconnect(cls):
"""同步连接RabbitMQ时会存在阻塞(阻塞会导致系统进行线程切换),本函数需要考虑多线程调用时线程安全问题。期望处于连接过程中,
其他线程不要再进行连接,而是直接抛出异常
Returns:
Raises:
InConnectionException: 连接异常
"""
with cls.LOCK:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or
cls.SINGLETON_CLIENT_CONNECTION.is_closed or
not cls.SINGLETON_CLIENT_CHANNEL or
cls.SINGLETON_CLIENT_CHANNEL.is_closed):
cls.IN_CONNECT = True
dispatch = "do_connect"
else:
dispatch = "raise_exception"
if dispatch == "do_connect":
try:
cls.SINGLETON_CLIENT_CONNECTION = pika.BlockingConnection(PARAMETERS)
cls.SINGLETON_CLIENT_CHANNEL = cls.SINGLETON_CLIENT_CONNECTION.channel()
finally:
# 此处仅保证IN_CONNECT一定被设置为False,异常交给外层函数处理
with cls.LOCK:
cls.IN_CONNECT = False
else:
raise InConnectionException()
@classmethod
def _async_reconnect(cls):
"""相比同步连接RabbitMQ方式,异步连接可以减少连接时由于RabbitMQ本身的不响应,导致连接阻塞过长,进而产生影响系统业务的后果
Returns:
Raises:
InConnectionException: 连接异常
"""
with cls.LOCK:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or cls.SINGLETON_CLIENT_CONNECTION.is_closed
or not cls.SINGLETON_CLIENT_CHANNEL or cls.SINGLETON_CLIENT_CHANNEL.is_closed):
cls.IN_CONNECT = True
dispatch = "do_connect"
else:
dispatch = "raise_exception"
if dispatch == "do_connect":
def _on_open_callback(*args, **kwargs):
"""connection open callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
def _on_channel_open(*args, **kwargs):
"""channel open callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
with cls.LOCK:
cls.IN_CONNECT = False
cls.SINGLETON_CLIENT_CHANNEL.basic_publish(exchange=config.EXCHANGE,
routing_key=config.ROUTING_KEY,
body="channel is opening",
properties=pika.BasicProperties(delivery_mode=2))
try:
cls.SINGLETON_CLIENT_CHANNEL = cls.SINGLETON_CLIENT_CONNECTION.channel(
on_open_callback=_on_channel_open)
except Exception as channel_open_error:
logger.error("channel open error: {}".format(str(channel_open_error)))
cls._process_execption() # 释放连接资源
with cls.LOCK:
cls.IN_CONNECT = False
def _on_open_error_callback(*args, **kwargs):
"""connection open error callback
Args:
*args (tuple): 不定长参数
**kwargs (dict): 不定长参数
Returns:
"""
cls._process_execption()
with cls.LOCK:
cls.IN_CONNECT = False
def _rabbit_ioloop_process(connection):
"""RabbitMQ ioloop
Args:
connection (object): pika.SelectConnection对象
Returns:
"""
try:
# ioloop: pika.adapters.base_connection
# start: pika.adapters.utils.selector_ioloop_adapter
connection.ioloop.start()
except Exception as rabbit_ioloop_error:
logger.error("RabbitMQ ioloop error: {}".format(str(rabbit_ioloop_error)))
cls._process_execption()
try:
cls.SINGLETON_CLIENT_CONNECTION = pika.SelectConnection(parameters=PARAMETERS,
on_open_callback=_on_open_callback,
on_open_error_callback=_on_open_error_callback)
threading.Thread(target=_rabbit_ioloop_process, args=(cls.SINGLETON_CLIENT_CONNECTION,)).start()
except Exception as async_connect_error:
logger.error("async connect failed: {}".format(str(async_connect_error)))
# 开始异步连接失败时,IN_CONNECT设置为False,连接开始后又callback回调函数修改IN_CONNECT
with cls.LOCK:
cls.IN_CONNECT = False
else:
raise InConnectionException()
@classmethod
def _process_execption(cls):
"""exception process
Returns:
"""
try:
if cls.SINGLETON_CLIENT_CHANNEL:
cls.SINGLETON_CLIENT_CHANNEL.close()
if cls.SINGLETON_CLIENT_CONNECTION:
cls.SINGLETON_CLIENT_CONNECTION.close()
except Exception as connect_rabbitmq_error:
logger.error("close rabbitmq connection failed: {}".format(str(connect_rabbitmq_error)))
finally:
cls.SINGLETON_CLIENT_CHANNEL = None
cls.SINGLETON_CLIENT_CONNECTION = None
def rabbitmq_connection_setup(self):
"""建立RabbitMQ连接
Returns:
"""
try:
# self._async_reconnect()
self._reconnect()
except Exception as rabbitmq_connection_setup_error:
logger.error("RabbitMQ connection setup failed: {}".format(str(rabbitmq_connection_setup_error)))
self._process_execption()
@classmethod
def send_rabbitmq_message(cls, message, routing_key, durable):
"""发送消息到RabbitMQ
Args:
message (str): 发送的消息
routing_key (str): 路由键
durable (bool): queue是否持久化,默认False
Returns:
tuple: 发送成功返回True,OK,发送失败返回False,描述
"""
ret = (True, "OK")
try:
if not cls.IN_CONNECT and (not cls.SINGLETON_CLIENT_CONNECTION or
cls.SINGLETON_CLIENT_CONNECTION.is_closed or not
cls.SINGLETON_CLIENT_CHANNEL or cls.SINGLETON_CLIENT_CHANNEL.is_closed):
# cls._async_reconnect()
cls._reconnect()
if cls.SINGLETON_CLIENT_CHANNEL:
if durable:
send_properties = pika.BasicProperties(delivery_mode=2)
else:
send_properties = None
cls.SINGLETON_CLIENT_CHANNEL.basic_publish(exchange=config.EXCHANGE,
routing_key=routing_key,
body=json.dumps(message),
properties=send_properties)
else:
ret = (False, "RabbitMQ connection is not ready!")
except InConnectionException as in_connection_error:
logger.warning("RabbitMQ connection exception: {}".format(str(in_connection_error)))
except Exception as other_error:
logger.error("send msg({}) to RabbitMQ({}) port({}) vhost({}) exchange({}) routing_key({}) failed!".format(
message, config.RABBITMQ_HOST, config.RABBITMQ_PORT, config.RABBITMQ_VHOST, config.EXCHANGE,
config.ROUTING_KEY
))
logger.error("Unexpected error occur: {}".format(str(other_error)))
cls._process_execption()
ret = (False, "Exception error")
return ret
class InConnectionException(Exception):
"""定义连接异常类
"""
def __str__(self):
"""抛异常时的打印函数
Returns:
str: 异常信息
"""
return "The main thread is connecting the rabbitmq host."
|
Lib.py
|
#!/usr/bin/env python3
import os, psutil, signal
import sys
import fcntl
import pytz
import time
from datetime import datetime
import multiprocessing
from multiprocessing import Queue
import subprocess, shlex
import atexit
import signal
import socketserver
import socket
import re
import shutil
def getTaipeiTime():
return datetime.now(pytz.timezone('Asia/Taipei')).strftime("%m-%d_%H-%M")
def check_PidAlive(pid):
"""
return True if the pid is still working
return False if the pid id dead
"""
if pid != None:
try:
if os.waitpid(pid, os.WNOHANG) == (0,0):
return True
else:
return False
except OSError:
pass
return False
def KillProcesses(pid):
'''
kill all the children of pid and itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
except Exception as e:
print("Failed to KillProcesses with pid={}\n Skip it.".format(pid))
return
parent.kill()
def KillChildren(pid):
'''
kill all the children of the pid except itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
try:
child.kill()
except Exception as e:
pass
except Exception as e:
print("Failed to KillChildren with pid={}\nReasons:{}".format(pid, e))
return
def KillPid(pid):
'''
kill the pid
'''
try:
os.kill(pid, signal.SIGKILL)
except Exception as e:
print("KillPid() failed.\n reasons:{}".format(e))
def LimitTimeExec(LimitTime, Func, *args):
"""
Input:
1. LimitTime: is in the unit of secs.
2. Func: must return a list that contains your return value
3. args: pass into Func
Return value:
1. isKilled: killed by timing
2. ret(int): from Func(args) to indicate success or not
"""
ret = -1
PrevWd = os.getcwd()
isKilled = False
WaitSecs = 0
WaitUnit = 10
ExecProc = multiprocessing.Process(target=Func, args=[args])
# NOTE: SIGKILL will not kill the children
# kill all its sub-process when parent is killed.
ExecProc.daemon = True
ExecProc.start()
while True:
date = getTaipeiTime()
if ExecProc.is_alive():
# log date to check liveness
print("Alive at {}".format(date))
time.sleep(WaitUnit)
WaitSecs += WaitUnit
else:
# return the return code to indicate success or not
ret = ExecProc.exitcode
isKilled = False
print("The command is finished at {} with exitcode={}, break.".format(date, ret))
break
if WaitSecs > LimitTime:
if not ExecProc.is_alive():
# if the work is done after the sleep
continue
# handle the processes twice, kill its children first
KillChildren(ExecProc.pid)
# with daemon flag, all children will be terminated
ExecProc.terminate()
KillPid(ExecProc.pid)
# wait for a few secs
ExecProc.join(10)
if ExecProc.exitcode is None: # exitcode is None for unfinished proc.
print("ExecProc.terminate() failed; Daemon handler exit.")
sys.exit(0)
isKilled = True
ret = -1
print("Achieve time limitation, kill it at {}.".format(getTaipeiTime()))
break
os.chdir(PrevWd)
return isKilled, ret
def ExecuteCmd(WorkerID=1, Cmd="", Block=True, ParallelBuild=False):
"""
return cmd's return code, STDOUT, STDERR
"""
# Use taskset by default
if Block:
'''
The taskset configuration depends on the hardware.
If your computer is other than 8700K, you must customized it.
Current configuration:
intel 8700K:
Core 0 as the "benchmark scheduler"
Core 1~5 as the "worker" to run programs.
Core 6~11 are not "real core", they are hardware threads shared with Core 0~5.
'''
CpuWorker = str((int(WorkerID) % 5) + 1)
TrainLoc = os.getenv("LLVM_THESIS_TrainingHome", "Error")
if not ParallelBuild:
FullCmd = "taskset -c " + CpuWorker + " " + Cmd
else:
if Cmd.split()[0] == "make":
FullCmd = Cmd + " -j" + str(multiprocessing.cpu_count())
else:
FullCmd = Cmd
#print(FullCmd)
p = subprocess.Popen(shlex.split(FullCmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
p.wait()
return p.returncode, out, err
else:
print("TODO: non-blocking execute", file=sys.stderr)
class EnvBuilder:
def CheckTestSuiteCmake(self, WorkerID):
"""
return LitTestDict: { target-name: .test-loc }
"""
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
if llvmSrc == "Error":
print("$LLVM_THESIS_HOME or not defined.", file=sys.stderr)
sys.exit(1)
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
PrevWd = os.getcwd()
# if the cmake is not done, do it once.
if not os.path.isdir(TestSrc):
os.mkdir(TestSrc)
os.chdir(TestSrc)
'''
ex.
cmake -DCMAKE_C_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang -DCMAKE_CXX_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang++ ../
'''
cBinSrc = llvmSrc + "/build-release-gcc7-worker" + WorkerID + "/bin/clang"
cxxBinSrc = cBinSrc + "++"
cmd = "cmake -DCMAKE_C_COMPILER=" + cBinSrc + " -DCMAKE_CXX_COMPILER=" + cxxBinSrc + " ../"
ret = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
os.chdir(PrevWd)
if ret != 0:
print("cmake failed.", file=sys.stderr)
sys.exit(1)
# Build .test dict for verification and run
LitTestDict = {}
'''
only add the "measurable targets"
'''
MeasurableRec = os.getenv("LLVM_THESIS_Random_LLVMTestSuiteScript", "Error")
MeasurableRec = \
MeasurableRec + '/GraphGen/output/newMeasurableStdBenchmarkMeanAndSigma'
MeasurableList = []
with open(MeasurableRec, 'r') as f:
for line in f:
MeasurableList.append(line.split(';')[0].split('/')[-1].strip())
for root, dirs, files in os.walk(TestSrc):
for file in files:
if file.endswith(".test"):
name = file[:-5]
if name in MeasurableList:
path = os.path.join(root, file)
LitTestDict[name] = path
return LitTestDict
def workerMake(self, args):
"""
Input: args(tuple):
[0]:WorkerID
[1]:BuildTarget
[2]:ParallelBuild <---This arg is optional
(Default is using taskset to build on a core)
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
PrevWd = os.getcwd()
WorkerID = args[0]
BuildTarget = args[1]
ParallelBuild = False
if len(args) > 2:
ParallelBuild = args[2]
ret = -1
'''
build
'''
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
os.chdir(TestSrc)
cmd = "make " + BuildTarget
ret, _, _ = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True, ParallelBuild=ParallelBuild)
return ret
def make(self, WorkerID, BuildTarget, ParallelBuild=False):
"""
return a number:
0 --> build success
others --> build failed
"""
isKilled, ret = LimitTimeExec(900, self.workerMake, WorkerID, BuildTarget, ParallelBuild)
if isKilled or ret != 0:
return -1
else:
return 0
def workerVerify(self, args):
"""
Input(tuple):
[0]:WorkerID
[1]:TestLoc
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
ret = -1
WorkerID = args[0]
TestLoc = args[1]
Lit = os.getenv("LLVM_THESIS_lit", "Error")
if Lit == "Error":
print("$LLVM_THESIS_lit not defined.", file=sys.stderr)
sys.exit(1)
cmd = Lit + " -q " + TestLoc
_, out, err = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
if out:
ret = -1
else:
ret = 0
return ret
def verify(self, WorkerID, TestLoc):
"""
return a number:
0 --> success and correct
others --> failed
"""
isKilled, ret = LimitTimeExec(500, self.workerVerify, WorkerID, TestLoc)
if isKilled or ret != 0:
return -1
else:
return 0
def distributePyActor(self, TestFilePath):
"""
return 0 for success
return -1 for failure.
"""
Log = LogService()
# Does this benchmark need stdin?
NeedStdin = False
with open(TestFilePath, "r") as TestFile:
for line in TestFile:
if line.startswith("RUN:"):
if line.find("<") != -1:
NeedStdin = True
break
TestFile.close()
# Rename elf and copy actor
ElfPath = TestFilePath.replace(".test", '')
NewElfPath = ElfPath + ".OriElf"
#based on "stdin" for to copy the right ones
InstrumentSrc = os.getenv("LLVM_THESIS_InstrumentHome", "Error")
if NeedStdin == True:
PyCallerLoc = InstrumentSrc + '/PyActor/WithStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithStdin/MimicAndFeatureExtractor.py'
else:
PyCallerLoc = InstrumentSrc + '/PyActor/WithoutStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithoutStdin/MimicAndFeatureExtractor.py'
try:
# Rename the real elf
shutil.move(ElfPath, NewElfPath)
# Copy the feature-extractor
shutil.copy2(PyActorLoc, ElfPath + ".py")
except Exception as e:
print("distributePyActor() errors, Reasons:\n{}".format(e))
return -1
# Copy the PyCaller
if os.path.exists(PyCallerLoc) == True:
shutil.copy2(PyCallerLoc, ElfPath)
else:
Log.err("Please \"$ make\" to get PyCaller in {}\n".format(PyCallerLoc))
return -1
return 0 #success
def run(self, WorkerID, TestLoc):
ret = self.verify(WorkerID, TestLoc)
return ret
class EnvResponseActor:
def EnvEcho(self, BuildTarget, WorkerID, LitTestDict, ParallelBuild=False):
"""
return "Success" or "Failed"
"""
testLoc = LitTestDict[BuildTarget]
retString = "Success"
'''
remove previous build and build again
'''
env = EnvBuilder()
'''
ex1. RUN: /llvm/test-suite/build-worker-1/SingleSource/Benchmarks/Dhrystone/dry
ex2. RUN: cd /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3 ; /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3/sqlite3 -init /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/sqlite3rc :memory: < /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/commands
'''
with open(testLoc, "r") as file:
fileCmd = file.readline()
file.close()
MultiCmdList = fileCmd.split(';')
if len(MultiCmdList) == 1:
# cases like ex1.
BuiltBin = fileCmd.split()[1]
else:
# cases like ex2.
BuiltBin = MultiCmdList[1].strip().split()[0]
'''
remove binary does not ensure it will be built again.
Therefore, we must use "make clean"
'''
binName = BuiltBin.split('/')[-1]
dirPath = BuiltBin[:-(len(binName) + 1)]
prevWd = os.getcwd()
'''
print("fileCmd={}".format(fileCmd))
print("BuiltBin={}".format(BuiltBin))
print("dirPath={}".format(dirPath))
print("binName={}".format(binName))
'''
os.chdir(dirPath)
os.system("make clean")
os.chdir(prevWd)
# remove feature file
FeatureFile = '/tmp/PredictionDaemon/worker-{}/features'.format(WorkerID)
if os.path.exists(FeatureFile):
os.remove(FeatureFile)
'''
build
assuming the proper cmake is already done.
'''
ret = env.make(WorkerID, BuildTarget, ParallelBuild)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
verify
'''
ret = env.verify(WorkerID, testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
distribute PyActor
'''
ret = env.distributePyActor(testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
run and extract performance
The return value from env.run() can be ignored.
We already use env.verify() to verify it.
'''
ret = env.run(WorkerID, testLoc)
return retString
class LogService():
def __init__(self):
pass
def outNotToFile(self, msg):
print(msg, end="", file=sys.stdout)
def FileWriter(self, path, msg):
file = open(path, "a")
fcntl.flock(file, fcntl.LOCK_EX)
file.write(msg)
fcntl.flock(file, fcntl.LOCK_UN)
file.close()
def out(self, msg):
self.outNotToFile(msg)
def err(self, msg):
self.out(msg)
#self.FileWriter("/tmp/PredictionDaemon.err", msg)
class ConnectInfoService():
def getConnectDict(self, path):
'''
return Dict[WorkerID] = ["RemoteEnv-ip", "RemoteEnv-port"]
'''
Dict = {}
with open(path, "r") as file:
# skip the header line
file.readline()
for line in file:
info = line.split(",")
strippedInfo = []
for subInfo in info:
strippedInfo.append(subInfo.strip())
Dict[strippedInfo[0]] = [strippedInfo[1], strippedInfo[2]]
file.close()
return Dict
|
spoof.py
|
import time
import threading
from scapy.all import ARP, send # pylint: disable=no-name-in-module
from .host import Host
from evillimiter.common.globals import BROADCAST
class ARPSpoofer(object):
def __init__(self, interface, gateway_ip, gateway_mac):
self.interface = interface
self.gateway_ip = gateway_ip
self.gateway_mac = gateway_mac
# interval in s spoofed ARP packets are sent to targets
self.interval = 2
self._hosts = set()
self._hosts_lock = threading.Lock()
self._running = False
def add(self, host):
with self._hosts_lock:
self._hosts.add(host)
host.spoofed = True
def remove(self, host, restore=True):
with self._hosts_lock:
self._hosts.discard(host)
if restore:
self._restore(host)
host.spoofed = False
def start(self):
thread = threading.Thread(target=self._spoof, args=[], daemon=True)
self._running = True
thread.start()
def stop(self):
self._running = False
def _spoof(self):
while self._running:
self._hosts_lock.acquire()
# make a deep copy to reduce lock time
hosts = self._hosts.copy()
self._hosts_lock.release()
for host in hosts:
if not self._running:
return
self._send_spoofed_packets(host)
time.sleep(self.interval)
def _send_spoofed_packets(self, host):
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, pdst=self.gateway_ip, hwdst=self.gateway_mac),
ARP(op=2, psrc=self.gateway_ip, pdst=host.ip, hwdst=host.mac)
]
[send(x, verbose=0, iface=self.interface) for x in packets]
def _restore(self, host):
"""
Remaps host and gateway to their actual addresses
"""
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, hwsrc=host.mac, pdst=self.gateway_ip, hwdst=BROADCAST),
ARP(op=2, psrc=self.gateway_ip, hwsrc=self.gateway_mac, pdst=host.ip, hwdst=BROADCAST)
]
[send(x, verbose=0, iface=self.interface, count=3) for x in packets]
|
btcproxy.py
|
""" A bitcoind proxy that allows instrumentation and canned responses
"""
from flask import Flask, request
from bitcoin.rpc import JSONRPCError
from bitcoin.rpc import RawProxy as BitcoinProxy
from utils import BitcoinD
from cheroot.wsgi import Server
from cheroot.wsgi import PathInfoDispatcher
import decimal
import flask
import json
import logging
import os
import threading
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override it's handling
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
class ProxiedBitcoinD(BitcoinD):
def __init__(self, bitcoin_dir, proxyport=0):
BitcoinD.__init__(self, bitcoin_dir, rpcport=None)
self.app = Flask("BitcoindProxy")
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST'])
self.proxyport = proxyport
self.mocks = {}
def _handle_request(self, r):
conf_file = os.path.join(self.bitcoin_dir, 'bitcoin.conf')
brpc = BitcoinProxy(btc_conf_file=conf_file)
method = r['method']
# If we have set a mock for this method reply with that instead of
# forwarding the request.
if method in self.mocks and type(method) == dict:
return self.mocks[method]
elif method in self.mocks and callable(self.mocks[method]):
return self.mocks[method](r)
try:
reply = {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
except JSONRPCError as e:
reply = {
"error": e.error,
"id": r['id']
}
return reply
def proxy(self):
r = json.loads(request.data.decode('ASCII'))
if isinstance(r, list):
reply = [self._handle_request(subreq) for subreq in r]
else:
reply = self._handle_request(r)
reply = json.dumps(reply, cls=DecimalEncoder)
logging.debug("Replying to %r with %r", r, reply)
response = flask.Response(reply)
response.headers['Content-Type'] = 'application/json'
return response
def start(self):
d = PathInfoDispatcher({'/': self.app})
self.server = Server(('0.0.0.0', self.proxyport), d)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
BitcoinD.start(self)
# Now that bitcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.proxiedport = self.rpcport
self.rpcport = self.server.bind_addr[1]
logging.debug("bitcoind reverse proxy listening on {}, forwarding to {}".format(
self.rpcport, self.proxiedport
))
def stop(self):
BitcoinD.stop(self)
self.server.stop()
self.proxy_thread.join()
def mock_rpc(self, method, response=None):
"""Mock the response to a future RPC call of @method
The response can either be a dict with the full JSON-RPC response, or a
function that returns such a response. If the response is None the mock
is removed and future calls will be passed through to bitcoind again.
"""
if response is not None:
self.mocks[method] = response
elif method in self.mocks:
del self.mocks[method]
# The main entrypoint is mainly used to test the proxy. It is not used during
# lightningd testing.
if __name__ == "__main__":
p = ProxiedBitcoinD(bitcoin_dir='/tmp/bitcoind-test/', proxyport=5000)
p.start()
p.proxy_thread.join()
|
views.py
|
import threading
import time
from news.models import News
from news.scrape import Scrape
from django.shortcuts import render
# Web scrape threading
def ScrapeThreading():
t1 = threading.Thread(target=Scrape, daemon=True) # daemon thread runs in background
t1.start()
time.sleep(600)
ScrapeThreading()
# Show News to user
def NewsView(request):
search = request.GET.get('search')
if search is None or bool(search) == False:
result = News.objects.order_by('-time')[:30]
number = result.count()
else:
print(search)
result = News.objects.filter(heading__icontains=search).order_by('-time')[:30]
number = result.count()
data = {
'news': result,
'number': number,
}
return render(request, 'news.html', data)
|
cmd.py
|
from subprocess import check_output
import subprocess
import threading
import locale
import os
kubectlCommand = os.environ["KUBETERMINAL_CMD"]
#execute kubectl commands
def executeCmd(cmd):
#TODO: if output is very long, this will hang until it is done
output = ""
try:
output = check_output(cmd,shell=True,stderr=subprocess.STDOUT,timeout=30)
output = output.decode('utf-8')
except subprocess.CalledProcessError as E:
output = E.output.decode('utf-8')
except subprocess.TimeoutExpired as E:
output = E.output.decode('utf-8')
output = "TIMEOUT when executing %s\n\n%s" % (cmd, output)
except:
#catch all exception including decoding errors
#assume decoding error
system_encoding = locale.getpreferredencoding()
output = output.decode(system_encoding)
return output
def executeBackgroudCmd(cmd):
'''Execute command in background thread. Does not print output.'''
#Thanks go to: http://sebastiandahlgren.se/2014/06/27/running-a-method-as-a-background-thread-in-python/
class BackgroundProcess(object):
""" Background process class
The run() method will be started and it will run in the background
"""
def __init__(self, cmd):
self.cmd = cmd
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
output = ""
try:
output = check_output(self.cmd,shell=True,stderr=subprocess.STDOUT,timeout=30)
output = output.decode('utf-8')
except subprocess.CalledProcessError as E:
output = E.output.decode('utf-8')
except subprocess.TimeoutExpired as E:
output = E.output.decode('utf-8')
output = "TIMEOUT when executing %s\n\n%s" % (cmd, output)
BackgroundProcess(cmd)
return "Delete pod started in background. Refresh pod list to see status."
def deletePod(podName,namespace,force):
cmd = kubectlCommand + " delete pod " + podName
cmd=cmd + " -n " + namespace
if (force == True):
cmd=cmd + " --grace-period=0 --force"
output = executeBackgroudCmd(cmd)
return output
def describePod(podName,namespace,options):
cmd = kubectlCommand + " describe pod " + podName
cmd=cmd +" -n "+namespace +" "+ options
output = executeCmd(cmd)
return output
def getPodYaml(podName,namespace):
cmd = kubectlCommand + " get pod " + podName
cmd=cmd+" -n " + namespace
cmd=cmd+" -o yaml "
output = ""
output = executeCmd(cmd)
return output
def getPodJSON(podName,namespace):
cmd = kubectlCommand + " get pod " + podName
cmd=cmd+" -n " + namespace
cmd=cmd+" -o json "
output = ""
output = executeCmd(cmd)
return output
def getPodLabels(podName,namespace):
resourceType = "pod"
cmd = kubectlCommand + " get %s %s -n %s --show-labels" % (resourceType, podName, namespace)
output = executeCmd(cmd)
return output
def getTop(podName,namespace,cmdString,isAllNamespaces=False):
cmd=None
if cmdString.find("-c") > -1:
#show top of selected pod and containers
cmd = kubectlCommand + " top pod %s -n %s --containers" % (podName,namespace)
if cmdString.find("-n") > -1:
#show top of nodes
cmd = kubectlCommand + " top nodes"
if cmdString.find("-l") > -1:
#show top of given labels
label=cmdString.split()[2]
cmd = kubectlCommand + " top pod -n %s -l %s" % (namespace,label)
if cmd == None:
if isAllNamespaces==True:
cmd = kubectlCommand + " top pods --all-namespaces"
else:
cmd = kubectlCommand + " top pods -n %s" % namespace
output = executeCmd(cmd)
return output
def execCmd(podName,namespace,command):
cmd = kubectlCommand + " exec " + podName
cmd=cmd+" -n " + namespace
if (command.find("-c")==0):
#there is container
commandList=command.split()
#first is -c
#second is container name
containerName=commandList[1]
cmd=cmd+" -c %s -- %s " % (containerName," ".join(commandList[2:]))
else:
cmd=cmd+" -- " + command
output = executeCmd(cmd)
return output
def logsPod(podName,namespace,options):
cmd = kubectlCommand + " logs " + podName
cmd=cmd +" -n "+namespace +" "+options
output = executeCmd(cmd)
return output
def getNodes(noderole=None):
cmd = kubectlCommand + " get nodes "
if noderole != None:
cmd = "%s -l node-role.kubernetes.io/%s" % (cmd,noderole)
output = executeCmd(cmd+" --no-headers")
return output
def describeNode(nodeName):
cmd = kubectlCommand + " describe node \"%s\" " % nodeName
output = executeCmd(cmd)
return output
def getDescribeNodes(noderole=None):
cmd = kubectlCommand + " describe nodes "
if noderole != None:
cmd = "%s -l node-role.kubernetes.io/%s" % (cmd,noderole)
output = executeCmd(cmd)
return output
def getPods(namespace,nodeNameList=[]):
cmd = kubectlCommand + " get pods "
if namespace == "all-namespaces":
cmd=cmd+"--"+namespace
else:
cmd=cmd+"-n "+namespace
cmd=cmd+" -o wide "
cmd=cmd+" --no-headers"
output = ""
if nodeNameList != None and len(nodeNameList)>0:
#get pods for specified nodes
for nodeName in nodeNameList:
cmd2="%s --field-selector spec.nodeName=%s" % (cmd,nodeName)
output2 = executeCmd(cmd2)
if output2.lower().find("no resources found") == -1:
output = output + output2
else:
output = executeCmd(cmd)
return output
def getNamespaces():
namespaces=[]
output = executeCmd(kubectlCommand + " get namespaces --no-headers")
if output.find("namespaces is forbidden") > -1:
#OpenShift does not allow normal users to list namespaces
#OpenShift has resource project that can be used
output = executeCmd(kubectlCommand + " get projects --no-headers")
for line in output.split('\n'):
fields = line.split()
if len(fields) > 0:
namespaces.append(fields[0])
return namespaces
def getResources(resourceType, namespace):
contentList=[]
namespaceOption = " -n %s " % namespace
allNamespaceOption = ""
if namespace == "all-namespaces":
namespaceOption = ""
allNamespaceOption = "--all-namespaces"
output = executeCmd(kubectlCommand + " %s get %s --no-headers %s" % (namespaceOption, resourceType, allNamespaceOption))
for line in output.split('\n'):
if len(line.split()) > 0:
contentList.append(line)
# fields = line.split()
# if len(fields) > 0:
# services.append(fields[0])
return contentList
def getContexts():
contentList=[]
output = executeCmd(kubectlCommand + " config get-contexts -o name")
for line in output.split('\n'):
if len(line.split()) > 0:
contentList.append(line)
return contentList
def getCurrentContext():
contentList=[]
output = executeCmd(kubectlCommand + " config current-context")
return output.strip()
|
ProbabilitySim.py
|
import time
import multiprocessing
import random
"""
Probability Simulator, provides different kind of "pools" to get items from
AbstractPool - AbstractClass just to make sure all pools implement methods
All pools return tuple.
Has clone ability to make new of the same object for in use of Simulator Class
"""
class AbstractPool():
def __init__(self) -> None:
self.items = None
def has_items(self,):
raise NotImplementedError()
def add(self,):
raise NotImplementedError()
def get(self,):
raise NotImplementedError()
def clone(self,):
raise NotImplementedError()
class ConstantPool(AbstractPool):
"Pool where there is only limited polls, and roll whether rolled item is success"
def __init__(self, rolls=3000) -> None:
self.items = {}
self.random = random.Random(time.time())
self.rolls = rolls
def has_items(self,) -> bool:
return 0<self.rolls
def add(self, weight : float, item : str) -> None:
if weight not in self.items:
self.items[weight] = []
self.items[weight].append(item)
def weights(self,) -> dict:
return sorted(self.items.keys(), reverse=True)
def get(self,) -> tuple:
tmp = self.random.uniform(0, 1)
weight = False
for i in self.weights():
if tmp < i:
weight = i
self.rolls -= 1
if weight:
return ((weight, self.random.choice(self.items[weight])), True)
else:
return ((None, None), None)
def clone(self):
tmp = ConstantPool(self.rolls)
tmp.items = self.items
return tmp
class WeightedPool(AbstractPool):
"""
WeightedPool - Rolls number whether you get item or not, depending what weights you put items, if for example items are:
0.5 : 3
0.25 : 3
0.1 : 3
You roll 0.6, you get None
You roll 0.4, you get 0.5, after that 0.5 items decrease by one, and if it reaches zero, it's deleted from the pool
You roll 0.01 you get 0.1, same behavior as in 0.5
You roll 0.12 you get 0.25, same behavior as in 0.5
"""
def __init__(self,) -> None:
self._items = None
self.items = {}
self.random = random.Random(time.time())
def has_items(self,) -> bool:
return True if self.items else False
def add(self, weight, amount) -> None:
if weight not in self.items:
self.items[weight] = 0
self.items[weight] += amount
def weights(self,) -> dict:
return sorted(self.items.keys(), reverse=True)
def get(self,) -> tuple:
if self.__items is None:
self.__items = dict(self.items)
tmp = self.random.uniform(0, 1)
weight = False
for i in self.weights():
if tmp < i:
if self.items[i] != 0:
weight = i
else:
del self.items[i]
if weight:
if self.items[weight] != 0:
self.items[weight] -= 1
else:
del self.items[weight]
return (weight, True)
else:
return (None, None)
def clone(self,) -> AbstractPool:
if self._items is None:
self._items = dict(self.items)
tmp = WeightedPool()
tmp.items = dict(self._items)
return tmp
class BoolWeightedPool(AbstractPool):
"""
BoolWeightedPool - Has guaranteed chance to get one item, but can result in failure if weight in the item is under the rolled number, if for example items are:
[0.5, 0.25, 0.1]
You request it, you get whether particular weight was successful or not.
It gets 0.25 and you roll 0.12, that is success so it returns tuple of (0.25, True). Item is also now removed from the pool
It gets 0.5 and you roll 0.6, that is failure so it returns tuple of (0.5, False). Same behavior as above.
"""
def __init__(self) -> None:
self._items = None
self._weights = set()
self.items = []
self.random = random.Random(time.time())
def has_items(self,) -> bool:
return True if self.items else False
def pool_count(self,):
return len(self.items)
def add(self, weight, amount) -> None:
self._weights.add(weight)
for i in range(0, amount):
self.items.append(weight)
def weights(self,) -> dict:
return sorted(list(self._weights), reverse=True)
def get(self,) -> tuple:
if self._items is None:
self._items = list(self.items)
if self.items:
self.random.shuffle(self.items)
weight = self.items.pop()
rng = self.random.uniform(0, 1)
success = False
if rng < weight:
success = True
if not success:
self.add(weight, 1)
return (weight, success)
return (None, None)
def reset(self,):
self.items = []
self._items = []
self._weights = set()
def clone(self,) -> AbstractPool:
if self._items is None:
self._items = list(self.items)
tmp = BoolWeightedPool()
tmp._weights = set(self._weights)
tmp.items = list(self._items)
return tmp
class _Simulation():
"""
Uses different pools to get successes, failures etc
"""
def __init__(self, pool: AbstractPool) -> None:
self.pool = pool
def _set_pool(self, pool: AbstractPool) -> None:
self.pool = pool
def run(self,) -> tuple:
is_constant_pool = isinstance(self.pool, ConstantPool)
compilation = {}
items = []
while self.pool.has_items():
weight, success = self.pool.get()
if is_constant_pool:
weight, item = weight
if weight is not None:
items.append((weight, item))
continue
if weight is not None:
if weight not in compilation:
compilation[weight] = {"successes" : 0, "failures" : 0, "total" : 0}
if success:
compilation[weight]["successes"] += 1
else:
compilation[weight]["failures"] += 1
compilation[weight]["total"] += 1
if is_constant_pool:
return items
combined_success = 0
combined_failures = 0
combined_total = 0
for weight, data in compilation.items():
if weight != 1:
combined_success += data["successes"]
combined_failures += data["failures"]
combined_total += data["total"]
return (combined_success, combined_failures, combined_total, compilation)
class _BulkSimulation(_Simulation):
"""
Same as Simulation, but this runs it in loops
"""
def __init__(self, pool: AbstractPool) -> None:
super().__init__(pool)
self.clone = pool.clone()
def run(self, amount : int, data=None) -> None:
combined_success = 0
combined_failures = 0
combined_total = 0
combined_data = {}
items = []
is_constant_pool = isinstance(self.clone, ConstantPool)
for i in range(amount):
if is_constant_pool:
items.extend(super().run())
tmp = self.clone.clone()
super()._set_pool(tmp)
else:
success, failures, total, _data = super().run()
combined_success += success
combined_failures += failures
combined_total += total
if not combined_data:
combined_data = _data
else:
for weight, iter_data in _data.items():
combined_data[weight]["successes"] += iter_data["successes"]
combined_data[weight]["failures"] += iter_data["failures"]
combined_data[weight]["total"] += iter_data["total"]
tmp = self.clone.clone()
super()._set_pool(tmp)
if is_constant_pool:
if data:
data.extend(items)
return items
for weight, iter_data in combined_data.items():
if data:
data[3][weight]["successes"] += iter_data["successes"]
data[3][weight]["failures"] += iter_data["failures"]
data[3][weight]["total"] += iter_data["total"]
if data:
data[0] += combined_success
data[1] += combined_failures
data[2] += combined_total
print(f"\nTotal: {combined_total}")
print(f"Success: {combined_success}", f"Failures: {combined_failures}")
print(f"Rate: {(combined_success/combined_total)*100}%")
return (combined_success, combined_failures, combined_total, combined_data)
class _ThreadedBulkSimulation():
"""
Same as BulkSimulation, but uses multiprocessing to reeally pump up some speed into it, much faster on higher counts
Probably wont use it in any bot command, as its super heavy too
"""
def __init__(self, pool: AbstractPool) -> None:
self.pool = pool.clone()
self.simulators = set()
self.per_thread = 3000
self.processes = []
self.is_constant_pool = isinstance(self.pool, ConstantPool)
def run(self, amount : int):
if amount < self.per_thread:
bulksim = BulkSimulation(self.pool.clone())
bulksim.run(amount)
else:
manager = multiprocessing.Manager()
data = manager.list()
if not self.is_constant_pool:
data.append(0)
data.append(0)
data.append(0)
data.append(manager.dict())
for i in self.pool.weights():
data[3][i] = {
"failures" : 0,
"successes" : 0,
"total" : 0
}
thread_count = amount//self.per_thread
for i in range(thread_count):
tmp = self.pool.clone()
bulksim = BulkSimulation(tmp)
self.simulators.add(bulksim)
tmp = multiprocessing.Process(target=bulksim.run, args=(self.per_thread, data,))
self.processes.append(tmp)
tmp.start()
if 0 < amount%self.per_thread:
bulksim = BulkSimulation(self.pool.clone())
bulksim.run(amount%self.per_thread, data)
for i in self.processes:
i.join()
success = data[0]
failures = data[1]
total = data[2]
specific = data[3]
# for weight, it_data in specific.items():
# print(f"Weight: {weight}", f"Total: {it_data['total']}")
# print(f"Success: {it_data['successes']}", f"Failures: {it_data['failures']}")
# print(f"Rate: {(it_data['successes']/it_data['total'])*100}%\n\n")
print(f"\nTotal: {total}")
print(f"Success: {success}", f"Failures: {failures}")
print(f"Rate: {(success/total)*100}%")
if __name__ == '__main__':
limited = ConstantPool(3000000)
limited.add(1/256, "Toxic")
limited.add(1/256, "Some other thing")
limited.add(1/30, "Yew")
# sim = ThreadedBulkSimulation(limited)
# start = time.time()
# sim.run(5)
# end = time.time()-start
# print(f"ThreadedBulk: {end}s")
sim2 = _BulkSimulation(limited)
start = time.time()
sim2.run(2)
end = time.time()-start
print(f"Bulk: {end}s")
|
views.py
|
# project/users/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, request, redirect, url_for, flash, abort
from sqlalchemy.exc import IntegrityError
from flask_login import login_user, current_user, login_required, logout_user
from flask_mail import Message
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from datetime import datetime
from twilio.rest import TwilioRestClient
from .forms import RegisterForm, LoginForm, EmailForm, PasswordForm
from project import db, mail, app
from project.models import User
################
#### config ####
################
users_blueprint = Blueprint('users', __name__)
##########################
#### helper functions ####
##########################
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), 'info')
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, html_body):
msg = Message(subject, recipients=recipients)
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for(
'users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
html = render_template(
'email_confirmation.html',
confirm_url=confirm_url)
send_email('Confirm Your Email Address', [user_email], html)
def send_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
password_reset_url = url_for(
'users.reset_with_token',
token = password_reset_serializer.dumps(user_email, salt='password-reset-salt'),
_external=True)
html = render_template(
'email_password_reset.html',
password_reset_url=password_reset_url)
send_email('Password Reset Requested', [user_email], html)
def send_new_user_text_message(new_user_email):
client = TwilioRestClient(app.config['ACCOUNT_SID'], app.config['AUTH_TOKEN'])
message = client.messages.create(
body="Kennedy Family Recipes... new user registered: {}".format(new_user_email), # Message body, if any
to=app.config['ADMIN_PHONE_NUMBER'],
from_=app.config['TWILIO_PHONE_NUMBER']
)
# flash('Text message sent to {}: {}'.format(app.config['ADMIN_PHONE_NUMBER'], message.body), 'success')
return
################
#### routes ####
################
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.email.data, form.password.data)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
login_user(new_user)
send_confirmation_email(new_user.email)
if 'ACCOUNT_SID' in app.config and not app.config['TESTING']:
send_new_user_text_message(new_user.email)
flash('Thanks for registering! Please check your email to confirm your email address.', 'success')
return redirect(url_for('recipes.user_recipes', recipe_type='All'))
except IntegrityError:
db.session.rollback()
flash('ERROR! Email ({}) already exists.'.format(form.email.data), 'error')
return render_template('register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.is_correct_password(form.password.data):
user.authenticated = True
user.last_logged_in = user.current_logged_in
user.current_logged_in = datetime.now()
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thanks for logging in, {}'.format(current_user.email))
return redirect(url_for('recipes.user_recipes', recipe_type='All'))
else:
flash('ERROR! Incorrect login credentials.', 'error')
return render_template('login.html', form=form)
@users_blueprint.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
flash('Goodbye!', 'info')
return redirect(url_for('users.login'))
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
flash('Thank you for confirming your email address!', 'success')
return redirect(url_for('recipes.public_recipes'))
@users_blueprint.route('/reset', methods=["GET", "POST"])
def reset():
form = EmailForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=form.email.data).first_or_404()
except:
flash('Invalid email address!', 'error')
return render_template('password_reset_email.html', form=form)
if user.email_confirmed:
send_password_reset_email(user.email)
flash('Please check your email for a password reset link.', 'success')
else:
flash('Your email address must be confirmed before attempting a password reset.', 'error')
return redirect(url_for('users.login'))
return render_template('password_reset_email.html', form=form)
@users_blueprint.route('/reset/<token>', methods=["GET", "POST"])
def reset_with_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except:
flash('The password reset link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
form = PasswordForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=email).first_or_404()
except:
flash('Invalid email address!', 'error')
return redirect(url_for('users.login'))
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('users.login'))
return render_template('reset_password_with_token.html', form=form, token=token)
@users_blueprint.route('/user_profile')
@login_required
def user_profile():
return render_template('user_profile.html')
@users_blueprint.route('/email_change', methods=["GET", "POST"])
@login_required
def user_email_change():
form = EmailForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
user_check = User.query.filter_by(email=form.email.data).first()
if user_check is None:
user = current_user
user.email = form.email.data
user.email_confirmed = False
user.email_confirmed_on = None
user.email_confirmation_sent_on = datetime.now()
db.session.add(user)
db.session.commit()
send_confirmation_email(user.email)
flash('Email changed! Please confirm your new email address (link sent to new email).', 'success')
return redirect(url_for('users.user_profile'))
else:
flash('Sorry, that email already exists!', 'error')
except IntegrityError:
flash('Error! That email already exists!', 'error')
return render_template('email_change.html', form=form)
@users_blueprint.route('/password_change', methods=["GET", "POST"])
@login_required
def user_password_change():
form = PasswordForm()
if request.method == 'POST':
if form.validate_on_submit():
user = current_user
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Password has been updated!', 'success')
return redirect(url_for('users.user_profile'))
return render_template('password_change.html', form=form)
@users_blueprint.route('/resend_confirmation')
@login_required
def resend_email_confirmation():
try:
send_confirmation_email(current_user.email)
flash('Email sent to confirm your email address. Please check your email!', 'success')
except IntegrityError:
flash('Error! Unable to send email to confirm your email address.', 'error')
return redirect(url_for('users.user_profile'))
@users_blueprint.route('/admin_view_users')
@login_required
def admin_view_users():
if current_user.role != 'admin':
abort(403)
else:
users = User.query.order_by(User.id).all()
return render_template('admin_view_users.html', users=users)
return redirect(url_for('users.login'))
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "I am alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
monitor.py
|
from pyudevmonitor.monitor import UDevMonitor
from pyudevmonitor.event import UEvent
import queue
import typing
import threading
from loguru import logger
from usb2container.volume import add_volume, remove_volume
from usb2container.config import global_config
ACTION_ADD: str = "add"
ACTION_BIND: str = "bind"
ACTION_REMOVE: str = "remove"
ACTION_UNBIND: str = "unbind"
DEVICE_DICT: typing.Dict[str, UEvent] = dict()
class Consumer(object):
def handle(self, new_event: UEvent):
action: str = new_event.ACTION
dev_path: str = new_event.DEVPATH
# ONLY ADD AND REMOVE
if action not in (ACTION_ADD, ACTION_REMOVE):
return
# ADD
if action == ACTION_ADD:
if dev_path in DEVICE_DICT:
logger.warning(f"device {dev_path} already existed. force cover ...")
if global_config.WITH_DOCKER:
new_event = add_volume(new_event)
DEVICE_DICT[dev_path] = new_event
# REMOVE
elif action == ACTION_REMOVE:
if dev_path not in DEVICE_DICT:
logger.warning(f"device {dev_path} not existed")
else:
cur: UEvent = DEVICE_DICT[dev_path]
if global_config.WITH_DOCKER:
remove_volume(cur)
del DEVICE_DICT[dev_path]
def loop_handle(self, from_queue: queue.Queue) -> typing.Callable:
stop: bool = False
def loop():
while not stop:
new = from_queue.get()
if not new.is_empty():
self.handle(new)
logger.info("loop handle stopped")
def stop_loop():
nonlocal stop
stop = True
threading.Thread(target=loop).start()
return stop_loop
class Monitor(object):
def __init__(self):
self.event_queue: queue.Queue = queue.Queue()
# udev event provider
self.provider = UDevMonitor()
# consumer
self.consumer = Consumer()
def start(self) -> typing.Callable:
self.provider.start()
logger.debug("provider process already setup")
# return value is a stop function
stop_provider = self.provider.loop_read(to=self.event_queue)
logger.debug("start getting events from provider")
stop_consumer = self.consumer.loop_handle(from_queue=self.event_queue)
logger.debug("consumer start handling events")
def stop():
stop_provider()
stop_consumer()
# empty event for stopping
self.event_queue.put(UEvent([]))
return stop
|
arm_control.py
|
#!/usr/bin/python3
# taken from https://github.com/AcutronicRobotics/mara_examples
import rclpy
from multiprocessing import Process
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from hrim_actuator_rotaryservo_msgs.msg import GoalRotaryServo
from hrim_actuator_gripper_srvs.srv import ControlFinger
class Gripper(Node):
def __init__(self):
super().__init__('mara_minimal_client')
# Create a client for service "/hrim_actuation_gripper_000000000004/goal"
self.client = self.create_client(ControlFinger, "/hrim_actuator_gripper_000000000004/fingercontrol")
# Wait for service to be avaiable before calling it
while not self.client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
# Create request with the same type as the service, ControlFinger
self.req = ControlFinger.Request()
def send_request(self):
self.future = self.client.call_async(self.req)
@staticmethod
def run(cmd_queue):
rclpy.init(args=None)
node = Gripper()
node.req.goal_velocity = 9999.
while True:
node.req.goal_angularposition = cmd_queue.get()
# Call service and spin
node.send_request()
rclpy.spin_until_future_complete(node, node.future)
class JointMove(Node):
def __init__(self, x, y, cmd_queue):
# Initialize Node with name "mara_minimal_publisher"
super().__init__('mara_minimal_publisher' + "_" + str(x) + "_" + str(y))
# Create a publisher on topic "/hrim_actuation_servomotor_000000000001/goal_axis1"
# !TODO one class controls all six joints
self.pub_ = self.create_publisher(GoalRotaryServo, '/hrim_actuator_rotaryservo_00000000000' + str(x) + '/goal_axis' + str(y),
qos_profile=qos_profile_sensor_data)
# Create message with the same type as the topic, GoalRotaryServo
self.msg = GoalRotaryServo()
# Create a timer to publish the messages periodically
timer_period = 1.0 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.cmd_queue = cmd_queue
self.cmd = None
def timer_callback(self):
# Fill message content
if self.cmd is None or not self.cmd_queue.empty():
self.cmd = self.cmd_queue.get()
self.msg.position = self.cmd * 3.1416/180 # Position to rads
self.msg.velocity = 30. # Velocity in rads/s
self.msg.control_type = 4 # Position and velocity control
# Publish message!
self.pub_.publish(self.msg)
@staticmethod
def run(x, y, cmd_queue):
rclpy.init(args=None)
minimal_publisher = JointMove(x, y, cmd_queue)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
def main(args=None):
# cmd_queue = Queue()
# cmd_queue.put(0.7)
# cmd_queue.put(0.05)
#
# #processes = [Process(target=JointMove.run, args=(i, j)) for i in range(1, 4) for j in range(1, 3)]
# processes = []
# processes.append(Process(target=Gripper.run, args=(cmd_queue,)))
#
# for process in processes:
# process.start()
#
# for process in processes:
# process.join()
#
#
pass
if __name__ == '__main__':
main()
|
local_dependency_manager.py
|
from contextlib import closing
from collections import namedtuple
import logging
import os
import threading
import traceback
import time
import shutil
from codalab.lib.formatting import size_str
from codalab.worker.file_util import remove_path, un_tar_directory
from codalab.worker.fsm import BaseDependencyManager, DependencyStage, StateTransitioner
import codalab.worker.pyjson
from codalab.worker.worker_thread import ThreadDict
from codalab.worker.state_committer import JsonStateCommitter
from codalab.worker.bundle_state import DependencyKey
logger = logging.getLogger(__name__)
DependencyState = namedtuple(
'DependencyState', 'stage dependency_key path size_bytes dependents last_used message killed'
)
class DownloadAbortedException(Exception):
"""
Exception raised by the download if a download is killed before it is complete
"""
def __init__(self, message):
super(DownloadAbortedException, self).__init__(message)
class LocalFileSystemDependencyManager(StateTransitioner, BaseDependencyManager):
"""
This dependency manager downloads dependency bundles from Codalab server
to the local filesystem. It caches all downloaded dependencies but cleans up the
old ones if the disk use hits the given threshold
For this class dependencies are uniquely identified by DependencyKey
"""
DEPENDENCIES_DIR_NAME = 'dependencies'
DEPENDENCY_FAILURE_COOLDOWN = 10
# TODO(bkgoksel): The server writes these to the worker_dependencies table, which stores the dependencies
# json as a SqlAlchemy LargeBinary, which defaults to MySQL BLOB, which has a size limit of
# 65K. For now we limit this value to about 58K to avoid any issues but we probably want to do
# something better (either specify MEDIUMBLOB in the SqlAlchemy definition of the table or change
# the data format of how we store this)
MAX_SERIALIZED_LEN = 60000
def __init__(self, commit_file, bundle_service, worker_dir, max_cache_size_bytes):
super(LocalFileSystemDependencyManager, self).__init__()
self.add_transition(DependencyStage.DOWNLOADING, self._transition_from_DOWNLOADING)
self.add_terminal(DependencyStage.READY)
self.add_terminal(DependencyStage.FAILED)
self._state_committer = JsonStateCommitter(commit_file)
self._bundle_service = bundle_service
self._max_cache_size_bytes = max_cache_size_bytes
self.dependencies_dir = os.path.join(
worker_dir, LocalFileSystemDependencyManager.DEPENDENCIES_DIR_NAME
)
if not os.path.exists(self.dependencies_dir):
logger.info('{} doesn\'t exist, creating.'.format(self.dependencies_dir))
os.makedirs(self.dependencies_dir, 0o770)
# Locks for concurrency
self._dependency_locks = dict() # type: Dict[DependencyKey, threading.RLock]
self._global_lock = threading.RLock() # Used for add/remove actions
self._paths_lock = threading.RLock() # Used for path name computations
# File paths that are currently being used to store dependencies. Used to prevent conflicts
self._paths = set()
# DependencyKey -> DependencyState
self._dependencies = dict()
# DependencyKey -> WorkerThread(thread, success, failure_message)
self._downloading = ThreadDict(fields={'success': False, 'failure_message': None})
self._load_state()
self._stop = False
self._main_thread = None
def _save_state(self):
with self._global_lock, self._paths_lock:
self._state_committer.commit({'dependencies': self._dependencies, 'paths': self._paths})
def _load_state(self):
state = self._state_committer.load(default={'dependencies': {}, 'paths': set()})
dependencies = {}
dependency_locks = {}
paths = set()
for dep_key, dep_state in state['dependencies'].items():
full_path = os.path.join(self.dependencies_dir, dep_state.path)
if os.path.exists(full_path):
dependencies[dep_key] = dep_state
dependency_locks[dep_key] = threading.RLock()
else:
logger.info(
"Dependency {} in loaded state but its path {} doesn't exist in the filesystem".format(
dep_key, full_path
)
)
if dep_state.path not in state['paths']:
state['paths'].add(dep_state.path)
logger.info(
"Dependency {} in loaded state but its path {} is not in the loaded paths {}".format(
dep_key, dep_state.path, state['paths']
)
)
for path in state['paths']:
full_path = os.path.join(self.dependencies_dir, path)
if os.path.exists(full_path):
paths.add(path)
else:
logger.info(
"Path {} in loaded state but doesn't exist in the filesystem".format(full_path)
)
with self._global_lock, self._paths_lock:
self._dependencies = dependencies
self._dependency_locks = dependency_locks
self._paths = paths
logger.info(
'{} dependencies, {} paths in cache.'.format(len(self._dependencies), len(self._paths))
)
def start(self):
logger.info('Starting local dependency manager')
def loop(self):
while not self._stop:
try:
self._process_dependencies()
self._save_state()
self._cleanup()
self._save_state()
except Exception:
traceback.print_exc()
time.sleep(1)
self._main_thread = threading.Thread(target=loop, args=[self])
self._main_thread.start()
def stop(self):
logger.info('Stopping local dependency manager')
self._stop = True
self._downloading.stop()
self._main_thread.join()
logger.info('Stopped local dependency manager')
def _process_dependencies(self):
for dep_key, dep_state in self._dependencies.items():
with self._dependency_locks[dep_key]:
self._dependencies[dep_key] = self.transition(dep_state)
def _prune_failed_dependencies(self):
"""
Prune failed dependencies older than DEPENDENCY_FAILURE_COOLDOWN seconds so that further runs
get to retry the download. Without pruning, any future run depending on a
failed dependency would automatically fail indefinitely.
"""
with self._global_lock:
self._acquire_all_locks()
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
and time.time() - dep_state.last_used
> LocalFileSystemDependencyManager.DEPENDENCY_FAILURE_COOLDOWN
}
for dep_key, dep_state in failed_deps.items():
self._delete_dependency(dep_key)
self._release_all_locks()
def _cleanup(self):
"""
Prune failed dependencies older than DEPENDENCY_FAILURE_COOLDOWN seconds.
Limit the disk usage of the dependencies (both the bundle files and the serialized state file size)
Deletes oldest failed dependencies first and then oldest finished dependencies.
Doesn't touch downloading dependencies.
"""
self._prune_failed_dependencies()
# With all the locks (should be fast if no cleanup needed, otherwise make sure nothing is corrupted
while True:
with self._global_lock:
self._acquire_all_locks()
bytes_used = sum(dep_state.size_bytes for dep_state in self._dependencies.values())
serialized_length = len(codalab.worker.pyjson.dumps(self._dependencies))
if (
bytes_used > self._max_cache_size_bytes
or serialized_length > LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN
):
logger.debug(
'%d dependencies in cache, disk usage: %s (max %s), serialized size: %s (max %s)',
len(self._dependencies),
size_str(bytes_used),
size_str(self._max_cache_size_bytes),
size_str(serialized_length),
LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN,
)
ready_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.READY and not dep_state.dependents
}
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
}
if failed_deps:
dep_key_to_remove = min(
failed_deps.items(), key=lambda dep: dep[1].last_used
)[0]
elif ready_deps:
dep_key_to_remove = min(
ready_deps.items(), key=lambda dep: dep[1].last_used
)[0]
else:
logger.info(
'Dependency quota full but there are only downloading dependencies, not cleaning up until downloads are over'
)
break
if dep_key_to_remove:
self._delete_dependency(dep_key_to_remove)
self._release_all_locks()
else:
self._release_all_locks()
break
def _delete_dependency(self, dependency_key):
"""
Remove the given dependency from the manager's state
Also delete any known files on the filesystem if any exist
"""
if self._acquire_if_exists(dependency_key):
try:
path_to_remove = self._dependencies[dependency_key].path
self._paths.remove(path_to_remove)
remove_path(path_to_remove)
except Exception:
pass
finally:
del self._dependencies[dependency_key]
self._dependency_locks[dependency_key].release()
def has(self, dependency_key):
"""
Takes a DependencyKey
Returns true if the manager has processed this dependency
"""
with self._global_lock:
return dependency_key in self._dependencies
def get(self, uuid, dependency_key):
"""
Request the dependency for the run with uuid, registering uuid as a dependent of this dependency
"""
now = time.time()
if not self._acquire_if_exists(dependency_key): # add dependency state if it does not exist
with self._global_lock:
self._dependency_locks[dependency_key] = threading.RLock()
self._dependency_locks[dependency_key].acquire()
self._dependencies[dependency_key] = DependencyState(
stage=DependencyStage.DOWNLOADING,
dependency_key=dependency_key,
path=self._assign_path(dependency_key),
size_bytes=0,
dependents=set([uuid]),
last_used=now,
message="Starting download",
killed=False,
)
# update last_used as long as it isn't in FAILED
if self._dependencies[dependency_key].stage != DependencyStage.FAILED:
self._dependencies[dependency_key].dependents.add(uuid)
self._dependencies[dependency_key] = self._dependencies[dependency_key]._replace(
last_used=now
)
self._dependency_locks[dependency_key].release()
return self._dependencies[dependency_key]
def release(self, uuid, dependency_key):
"""
Register that the run with uuid is no longer dependent on this dependency
If no more runs are dependent on this dependency, kill it
"""
if self._acquire_if_exists(dependency_key):
dep_state = self._dependencies[dependency_key]
if uuid in dep_state.dependents:
dep_state.dependents.remove(uuid)
if not dep_state.dependents:
dep_state = dep_state._replace(killed=True)
self._dependencies[dependency_key] = dep_state
self._dependency_locks[dependency_key].release()
def _acquire_if_exists(self, dependency_key):
"""
Safely acquires a lock for the given dependency if it exists
Returns True if depedendency exists, False otherwise
Callers should remember to release the lock
"""
with self._global_lock:
if dependency_key in self._dependencies:
self._dependency_locks[dependency_key].acquire()
return True
else:
return False
def _acquire_all_locks(self):
"""
Acquires all dependency locks in the thread it's called from
"""
with self._global_lock:
for dependency, lock in self._dependency_locks.items():
lock.acquire()
def _release_all_locks(self):
"""
Releases all dependency locks in the thread it's called from
"""
with self._global_lock:
for dependency, lock in self._dependency_locks.items():
lock.release()
def _assign_path(self, dependency_key):
"""
Normalize the path for the dependency by replacing / with _, avoiding conflicts
"""
if dependency_key.parent_path:
path = os.path.join(dependency_key.parent_uuid, dependency_key.parent_path)
else:
path = dependency_key.parent_uuid
path = path.replace(os.path.sep, '_')
# You could have a conflict between, for example a/b_c and
# a_b/c. We have to avoid those.
with self._paths_lock:
while path in self._paths:
path = path + '_'
self._paths.add(path)
return path
def _store_dependency(self, dependency_path, fileobj, target_type):
"""
Copy the dependency fileobj to its path in the local filesystem
Overwrite existing files by the same name if found
(may happen if filesystem modified outside the dependency manager,
for example during an update if the state gets reset but filesystem
doesn't get cleared)
"""
try:
if os.path.exists(dependency_path):
logger.info('Path %s already exists, overwriting', dependency_path)
if os.path.isdir(dependency_path):
shutil.rmtree(dependency_path)
else:
os.remove(dependency_path)
if target_type == 'directory':
un_tar_directory(fileobj, dependency_path, 'gz')
else:
with open(dependency_path, 'wb') as f:
logger.debug('copying file to %s', dependency_path)
shutil.copyfileobj(fileobj, f)
except Exception:
raise
@property
def all_dependencies(self):
with self._global_lock:
return list(self._dependencies.keys())
def _transition_from_DOWNLOADING(self, dependency_state):
def download():
def update_state_and_check_killed(bytes_downloaded):
"""
Callback method for bundle service client updates dependency state and
raises DownloadAbortedException if download is killed by dep. manager
"""
with self._dependency_locks[dependency_state.dependency_key]:
state = self._dependencies[dependency_state.dependency_key]
if state.killed:
raise DownloadAbortedException("Aborted by user")
self._dependencies[dependency_state.dependency_key] = state._replace(
size_bytes=bytes_downloaded,
message="Downloading dependency: %s downloaded"
% size_str(bytes_downloaded),
)
dependency_path = os.path.join(self.dependencies_dir, dependency_state.path)
logger.debug('Downloading dependency %s', dependency_state.dependency_key)
try:
# Start async download to the fileobj
fileobj, target_type = self._bundle_service.get_bundle_contents(
dependency_state.dependency_key.parent_uuid,
dependency_state.dependency_key.parent_path,
)
with closing(fileobj):
# "Bug" the fileobj's read function so that we can keep
# track of the number of bytes downloaded so far.
old_read_method = fileobj.read
bytes_downloaded = [0]
def interruptable_read(*args, **kwargs):
data = old_read_method(*args, **kwargs)
bytes_downloaded[0] += len(data)
update_state_and_check_killed(bytes_downloaded[0])
return data
fileobj.read = interruptable_read
# Start copying the fileobj to filesystem dependency path
self._store_dependency(dependency_path, fileobj, target_type)
logger.debug(
'Finished downloading %s dependency %s to %s',
target_type,
dependency_state.dependency_key,
dependency_path,
)
with self._dependency_locks[dependency_state.dependency_key]:
self._downloading[dependency_state.dependency_key]['success'] = True
except Exception as e:
with self._dependency_locks[dependency_state.dependency_key]:
self._downloading[dependency_state.dependency_key]['success'] = False
self._downloading[dependency_state.dependency_key][
'failure_message'
] = "Dependency download failed: %s " % str(e)
self._downloading.add_if_new(
dependency_state.dependency_key, threading.Thread(target=download, args=[])
)
if self._downloading[dependency_state.dependency_key].is_alive():
return dependency_state
success = self._downloading[dependency_state.dependency_key]['success']
failure_message = self._downloading[dependency_state.dependency_key]['failure_message']
self._downloading.remove(dependency_state.dependency_key)
if success:
return dependency_state._replace(
stage=DependencyStage.READY, message="Download complete"
)
else:
with self._paths_lock:
self._paths.remove(dependency_state.path)
return dependency_state._replace(stage=DependencyStage.FAILED, message=failure_message)
|
run.py
|
import os
import time
import shutil
import torch
import numpy as np
import numpy.random as rd
import multiprocessing as mp
from env import build_env
from replay import ReplayBuffer, ReplayBufferMP, ReplayBufferMARL
from evaluator import Evaluator
from tqdm import tqdm
"""[ElegantRL.2021.09.09](https://github.com/AI4Finance-LLC/ElegantRL)"""
class Arguments:
def __init__(self, if_on_policy=False):
self.env = None # the environment for training
self.agent = None # Deep Reinforcement Learning algorithm
'''Arguments for training'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.learning_rate = 2 ** -15 # 2 ** -14 ~= 3e-5
self.soft_update_tau = 2 ** -8 # 2 ** -8 ~= 5e-3
self.if_on_policy = if_on_policy
if self.if_on_policy: # (on-policy)
self.net_dim = 2 ** 9 # the network width
self.batch_size = self.net_dim * 2 # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 3 # collect target_step, then update network
self.target_step = 2 ** 12 # repeatedly update network to keep critic's loss small
self.max_memo = self.target_step # capacity of replay buffer
self.if_per_or_gae = False # GAE for on-policy sparse reward: Generalized Advantage Estimation.
else:
self.net_dim = 2 ** 8 # the network width
self.batch_size = self.net_dim # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 0 # repeatedly update network to keep critic's loss small
self.target_step = 2 ** 10 # collect target_step, then update network
self.max_memo = 2 ** 21 # capacity of replay buffer
self.if_per_or_gae = False # PER for off-policy sparse reward: Prioritized Experience Replay.
'''Arguments for device'''
self.env_num = 1 # The Environment number for each worker. env_num == 1 means don't use VecEnv.
self.worker_num = 2 # rollout workers number pre GPU (adjust it to get high GPU usage)
self.thread_num = 8 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads)
self.visible_gpu = '0' # for example: os.environ['CUDA_VISIBLE_DEVICES'] = '0, 2,'
self.random_seed = 0 # initialize random seed in self.init_before_training()
'''Arguments for evaluate and save'''
self.cwd = None # current work directory. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = 2 ** 20 # break training after 'total_step > break_step'
self.if_allow_break = True # allow break training when reach goal (early termination)
self.eval_env = None # the environment for evaluating. None means set automatically.
self.eval_gap = 2 ** 7 # evaluate the agent per eval_gap seconds
self.eval_times1 = 2 ** 3 # number of times that get episode return in first
self.eval_times2 = 2 ** 4 # number of times that get episode return in second
self.eval_device_id = -1 # -1 means use cpu, >=0 means use GPU
def init_before_training(self, if_main):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.thread_num)
torch.set_default_dtype(torch.float32)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.visible_gpu)
'''env'''
if self.env is None:
raise RuntimeError(f'\n| Why env=None? For example:'
f'\n| args.env = XxxEnv()'
f'\n| args.env = str(env_name)'
f'\n| args.env = build_env(env_name), from elegantrl.env import build_env')
if not (isinstance(self.env, str) or hasattr(self.env, 'env_name')):
raise RuntimeError('\n| What is env.env_name? use env=PreprocessEnv(env).')
'''agent'''
if self.agent is None:
raise RuntimeError(f'\n| Why agent=None? Assignment `args.agent = AgentXXX` please.')
if not hasattr(self.agent, 'init'):
raise RuntimeError(f"\n| why hasattr(self.agent, 'init') == False"
f'\n| Should be `agent=AgentXXX()` instead of `agent=AgentXXX`.')
if self.agent.if_on_policy != self.if_on_policy:
raise RuntimeError(f'\n| Why bool `if_on_policy` is not consistent?'
f'\n| self.if_on_policy: {self.if_on_policy}'
f'\n| self.agent.if_on_policy: {self.agent.if_on_policy}')
'''cwd'''
if self.cwd is None:
agent_name = self.agent.__class__.__name__
env_name = getattr(self.env, 'env_name', self.env)
self.cwd = f'./{agent_name}_{env_name}_{self.visible_gpu}'
if if_main:
# remove history according to bool(if_remove)
if self.if_remove is None:
self.if_remove = bool(input(f"| PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
elif self.if_remove:
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Remove cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
'''single processing training'''
def mpe_make_env(scenario_name, benchmark=False):
'''
Creates a MultiAgentEnv object as env. This can be used similar to a gym
environment by calling env.reset() and env.step().
Use env.render() to view the environment on the screen.
Input:
scenario_name : name of the scenario from ./scenarios/ to be Returns
(without the .py extension)
benchmark : whether you want to produce benchmarking data
(usually only done during evaluation)
Some useful env properties (see environment.py):
.observation_space : Returns the observation space for each agent
.action_space : Returns the action space for each agent
.n : Returns the number of Agents
'''
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
def train_and_evaluate(args, agent_id=0):
args.init_before_training(if_main=True)
env = build_env(args.env, if_print=False)
'''init: Agent'''
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim, args.learning_rate,args.marl, args.n_agents, args.if_per_or_gae, args.env_num)
#agent.save_or_load_agent(args.cwd, if_save=False)
'''init Evaluator'''
eval_env = build_env(env) if args.eval_env is None else args.eval_env
evaluator = Evaluator(args.cwd, agent_id, agent.device, eval_env,
args.eval_gap, args.eval_times1, args.eval_times2)
evaluator.save_or_load_recoder(if_save=False)
'''init ReplayBuffer'''
if agent.if_on_policy:
buffer = list()
else:
buffer = ReplayBufferMARL(max_len=args.max_memo, state_dim=env.state_dim,
action_dim= env.action_dim,n_agents = 3,
if_use_per=args.if_per_or_gae)
buffer.save_or_load_history(args.cwd, if_save=False)
"""start training"""
cwd = args.cwd
gamma = args.gamma
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
reward_scale = args.reward_scale
if_allow_break = args.if_allow_break
soft_update_tau = args.soft_update_tau
del args
'''choose update_buffer()'''
if agent.if_on_policy:
assert isinstance(buffer, list)
def update_buffer(_trajectory):
_trajectory = list(map(list, zip(*_trajectory))) # 2D-list transpose
ten_state = torch.as_tensor(_trajectory[0])
ten_reward = torch.as_tensor(_trajectory[1], dtype=torch.float32) * reward_scale
ten_mask = (1.0 - torch.as_tensor(_trajectory[2], dtype=torch.float32)) * gamma # _trajectory[2] = done
ten_action = torch.as_tensor(_trajectory[3])
ten_noise = torch.as_tensor(_trajectory[4], dtype=torch.float32)
buffer[:] = (ten_state, ten_action, ten_noise, ten_reward, ten_mask)
_steps = ten_reward.shape[0]
_r_exp = ten_reward.mean()
return _steps, _r_exp
else:
assert isinstance(buffer, ReplayBufferMARL)
def update_buffer(_trajectory_list):
_steps = 0
_r_exp = 0
#print(_trajectory_list.shape)
for _trajectory in _trajectory_list:
ten_state = torch.as_tensor([item[0] for item in _trajectory], dtype=torch.float32)
ten_reward = torch.as_tensor([item[1] for item in _trajectory])
ten_done = torch.as_tensor([item[2] for item in _trajectory])
ten_action = torch.as_tensor([item[3] for item in _trajectory])
ten_reward = ten_reward * reward_scale # ten_reward
ten_mask = (1.0 - ten_done *1) * gamma # ten_mask = (1.0 - ary_done) * gamma
buffer.extend_buffer(ten_state, ten_reward, ten_mask, ten_action)
_steps += ten_state.shape[0]
_r_exp += ten_reward.mean() # other = (reward, mask, action)
return _steps, _r_exp
'''init ReplayBuffer after training start'''
agent.states = env.reset()
agent.if_on_policy = True
if not agent.if_on_policy:
#if_load = buffer.save_or_load_history(cwd, if_save=False)
if_load = 0
if not if_load:
trajectory = explore_before_training(env, target_step)
trajectory = [trajectory, ]
steps, r_exp = update_buffer(trajectory)
evaluator.total_step += steps
'''start training loop'''
if_train = True
#cnt_train = 0
state = env.reset()
for cnt_train in tqdm(range(2000000)):
# while if_train or cnt_train < 2000000:
if cnt_train % 100 ==0 and cnt_train > 0:
state = env.reset()
with torch.no_grad():
traj_temp = list()
actions = []
for i in range(agent.n_agents):
action = agent.agents[i].select_actions(state[i])
actions.append(action)
next_s, reward, done, _ = env.step(actions)
traj_temp.append((state, reward, done, actions))
state = next_s
#trajectory = agent.explore_env(env, target_step)
steps, r_exp = update_buffer([traj_temp,])
if cnt_train > agent.batch_size:
agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if cnt_train % 1000 == 0:
with torch.no_grad():
temp = evaluator.evaluate_and_save_marl(agent, steps, r_exp)
if_reach_goal, if_save = temp
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
env.close()
agent.save_or_load_agent(cwd, if_save=True)
buffer.save_or_load_history(cwd, if_save=True) if not agent.if_on_policy else None
evaluator.save_or_load_recoder(if_save=True)
def explore_before_training(env, target_step): # for off-policy only
trajectory = list()
if_discrete = env.if_discrete
action_dim = env.action_dim
state = env.reset()
step = 0
k = 0
while True:
k = k +1
if if_discrete:
action = [rd.randn(action_dim),rd.randn(action_dim),rd.randn(action_dim)] # assert isinstance(action_int)
next_s, reward, done, _ = env.step(action)
other = (reward, done, action)
else:
action = rd.uniform(-1, 1, size=action_dim)
next_s, reward, done, _ = env.step(action)
other = (reward, done, *action)
trajectory.append((state, reward, done, action))
if k > 100:
state = env.reset()
k = 0
else:
state = next_s
step += 1
if done and step > target_step:
break
return trajectory
def explore_before_training_vec_env(env, target_step) -> list: # for off-policy only
# plan to be elegant: merge this function to explore_before_training()
assert hasattr(env, 'env_num')
env_num = env.env_num
trajectory_list = [list() for _ in range(env_num)]
if_discrete = env.if_discrete
action_dim = env.action_dim
states = env.reset()
step = 0
while True:
if if_discrete:
actions = rd.randint(action_dim, size=env_num)
s_r_d_list = env.step(actions)
next_states = list()
for env_i in range(env_num):
next_s, reward, done = s_r_d_list[env_i]
trajectory_list[env_i].append((states[env_i], (reward, done, actions[env_i])))
next_states.append(next_s)
else:
actions = rd.uniform(-1, 1, size=(env_num, action_dim))
s_r_d_list = env.step(actions)
next_states = list()
for env_i in range(env_num):
next_s, reward, done = s_r_d_list[env_i]
trajectory_list[env_i].append((states[env_i], (reward, done, *actions[env_i])))
next_states.append(next_s)
states = next_states
step += 1
if step > target_step:
break
return trajectory_list
'''multiple processing training'''
class PipeWorker:
def __init__(self, env_num, worker_num):
self.env_num = env_num
self.worker_num = worker_num
self.pipes = [mp.Pipe() for _ in range(worker_num)]
self.pipe1s = [pipe[1] for pipe in self.pipes]
def explore(self, agent):
act_dict = agent.act.state_dict()
for worker_id in range(self.worker_num):
self.pipe1s[worker_id].send(act_dict)
trajectory_lists = [pipe1.recv() for pipe1 in self.pipe1s]
return trajectory_lists
def run(self, args, comm_env, worker_id, learner_id):
# print(f'| os.getpid()={os.getpid()} PipeExplore.run {learner_id}')
args.init_before_training(if_main=False)
'''init Agent'''
env = build_env(args.env, if_print=False)
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim,
args.learning_rate, args.if_per_or_gae, args.env_num, learner_id)
'''loop'''
gamma = args.gamma
target_step = args.target_step
reward_scale = args.reward_scale
if args.if_on_policy:
def convert_trajectory(_trajectory): # on-policy
_trajectory = list(map(list, zip(*_trajectory))) # 2D-list transpose
ten_state = torch.as_tensor(_trajectory[0])
ten_reward = torch.as_tensor(_trajectory[1], dtype=torch.float32) * reward_scale
ten_mask = (1 - torch.as_tensor(_trajectory[2], dtype=torch.float32)) * gamma
ten_action = torch.as_tensor(_trajectory[3])
ten_noise = torch.as_tensor(_trajectory[4], dtype=torch.float32)
return ten_state, ten_action, ten_noise, ten_reward, ten_mask
else:
def convert_trajectory(_trajectory): # off-policy
temp = list()
for item_trajectory in _trajectory:
ten_state = torch.as_tensor([item[0] for item in item_trajectory])
ten_other = torch.as_tensor([item[1] for item in item_trajectory])
ten_other[:, 0] = ten_other[:, 0] * reward_scale # ten_reward
ten_other[:, 1] = (1.0 - ten_other[:, 1]) * gamma # ten_mask = (1.0 - ten_done) * gamma
temp.append((ten_state, ten_other))
return temp
del args
if comm_env:
env = comm_env
agent.states = env.reset()
else:
agent.states = [env.reset(), ]
with torch.no_grad():
while True:
act_dict = self.pipes[worker_id][0].recv()
agent.act.load_state_dict(act_dict)
trajectory = agent.explore_env(env, target_step)
trajectory = convert_trajectory(trajectory)
self.pipes[worker_id][0].send(trajectory)
def get_comm_data(agent):
act = list(agent.act.parameters())
cri_optim = get_optim_parameters(agent.cri_optim)
if agent.cri is agent.act:
cri = None
act_optim = None
else:
cri = list(agent.cri.parameters())
act_optim = get_optim_parameters(agent.act_optim)
act_target = list(agent.act_target.parameters()) if agent.if_use_act_target else None
cri_target = list(agent.cri_target.parameters()) if agent.if_use_cri_target else None
return act, act_optim, cri, cri_optim, act_target, cri_target # data
class PipeLearner:
def __init__(self, learner_num):
self.learner_num = learner_num
self.round_num = int(np.log2(learner_num))
self.pipes = [mp.Pipe() for _ in range(learner_num)]
pipes = [mp.Pipe() for _ in range(learner_num)]
self.pipe0s = [pipe[0] for pipe in pipes]
self.pipe1s = [pipe[1] for pipe in pipes]
self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)]
if learner_num == 1:
self.idx_l = None
elif learner_num == 2:
self.idx_l = [(1,), (0,), ]
elif learner_num == 4:
self.idx_l = [(1, 2), (0, 3),
(3, 0), (2, 1), ]
elif learner_num == 8:
self.idx_l = [(1, 2, 4), (0, 3, 5),
(3, 0, 6), (2, 1, 7),
(5, 6, 0), (4, 7, 1),
(7, 4, 2), (6, 5, 3), ]
else:
print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)")
exit()
def comm_data(self, data, learner_id, round_id):
if round_id == -1:
learner_jd = self.idx_l[learner_id][round_id]
self.pipes[learner_jd][0].send(data)
return self.pipes[learner_id][1].recv()
else:
learner_jd = self.idx_l[learner_id][round_id]
self.pipe0s[learner_jd].send(data)
return self.pipe1s[learner_id].recv()
def comm_network_optim(self, agent, learner_id):
device = self.device_list[learner_id]
for round_id in range(self.round_num):
data = get_comm_data(agent)
data = self.comm_data(data, learner_id, round_id)
if data:
avg_update_net(agent.act, data[0], device)
avg_update_optim(agent.act_optim, data[1], device) if data[1] else None
avg_update_net(agent.cri, data[2], device) if data[2] else None
avg_update_optim(agent.cri_optim, data[3], device)
avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None
avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None
def run(self, args, comm_eva, comm_exp, learner_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
args.init_before_training(if_main=learner_id == 0)
env = build_env(args.env, if_print=False)
if_on_policy = args.if_on_policy
'''init Agent'''
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim,
args.learning_rate, args.if_per_or_gae, args.env_num, learner_id)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if if_on_policy:
buffer = list()
def update_buffer(_trajectory_lists): # on-policy
# _trajectory_lists = ((ten_state, ten_action, ten_noise, ten_reward, ten_mask), ...)
_trajectory_lists = list(map(list, zip(*_trajectory_lists)))
buffer[:] = [torch.cat(tensors, dim=0) for tensors in _trajectory_lists]
_steps = buffer[3].shape[0] # buffer[3] = ary_reward
_r_exp = buffer[3].mean().item() # buffer[3] = ary_reward
return _steps, _r_exp
else:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=env.state_dim,
action_dim=1 if env.if_discrete else env.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=learner_id)
def update_buffer(_trajectory_lists): # off-policy
# trajectory_lists = ((ten_state, ten_other), ...)
_trajectory_lists = sum(_trajectory_lists, list()) # list2d.flatten()
_steps = 0
_r_exp = 0
for idx_buffer in range(buffer.worker_num):
ten_state, ten_other = _trajectory_lists[idx_buffer]
buffer.buffers[idx_buffer].extend_buffer(ten_state, ten_other)
_steps += ten_other.shape[0]
_r_exp += ten_other[:, 0].sum() # other = (reward, mask, *action)
_r_exp /= _steps
return _steps, _r_exp
buffer.save_or_load_history(args.cwd, if_save=False)
'''start training'''
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
trajectory_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(trajectory_lists, learner_id, round_id=-1)
trajectory_lists.extend(data)
steps, r_exp = update_buffer(trajectory_lists)
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if not if_on_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
class PipeEvaluator:
def __init__(self):
super().__init__()
self.pipe0, self.pipe1 = mp.Pipe()
def evaluate_and_save_mp(self, agent_act, steps, r_exp, logging_tuple):
if self.pipe1.poll(): # if_evaluator_idle
if_train, if_save = self.pipe1.recv()
act_cpu_dict = {k: v.cpu() for k, v in agent_act.state_dict().items()}
else:
if_train, if_save = True, False
act_cpu_dict = None
self.pipe1.send((act_cpu_dict, steps, r_exp, logging_tuple))
return if_train, if_save
def run(self, args, agent_id):
# print(f'| os.getpid()={os.getpid()} PipeEvaluate.run {agent_id}')
args.init_before_training(if_main=False)
'''init: Agent'''
env = build_env(args.env, if_print=False)
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim, args.learning_rate,
args.if_per_or_gae, args.env_num, agent_id=args.eval_device_id)
agent.save_or_load_agent(args.cwd, if_save=False)
act_cpu = agent.act
act_cpu.eval()
[setattr(param, 'requires_grad', False) for param in act_cpu.parameters()]
'''init Evaluator'''
eval_env = args.eval_env if args.eval_env else build_env(env, if_print=False)
evaluator = Evaluator(args.cwd, agent_id, agent.device, eval_env,
args.eval_gap, args.eval_times1, args.eval_times2)
evaluator.save_or_load_recoder(if_save=False)
del agent
del env
'''loop'''
cwd = args.cwd
break_step = args.break_step
if_allow_break = args.if_allow_break
del args
if_save = False
if_train = True
if_reach_goal = False
with torch.no_grad():
while if_train:
act_cpu_dict, steps, r_exp, logging_tuple = self.pipe0.recv()
if act_cpu_dict:
act_cpu.load_state_dict(act_cpu_dict)
if_reach_goal, if_save = evaluator.evaluate_and_save(act_cpu, steps, r_exp, logging_tuple)
else:
evaluator.total_step += steps
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
self.pipe0.send((if_train, if_save))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
evaluator.save_or_load_recoder(if_save=True)
class PipeVectorEnv:
def __init__(self, args):
self.env_num = args.env_num
self.pipes = [mp.Pipe() for _ in range(self.env_num)]
self.pipe0s = [pipe[0] for pipe in self.pipes]
env = build_env(args.eval_env)
self.max_step = env.max_step
self.env_name = env.env_name
self.state_dim = env.state_dim
self.action_dim = env.action_dim
self.action_max = env.action_max
self.if_discrete = env.if_discrete
self.target_return = env.target_return
del env
self.process = list()
for env_id in range(args.env_num):
self.process.append(mp.Process(target=self.run, args=(args, env_id)))
args.random_seed += 1 # set different for each env
# [p.start() for p in self.process]
def reset(self):
vec_state = [pipe0.recv() for pipe0 in self.pipe0s]
return vec_state
def step(self, vec_action): # pipe0_step
for i in range(self.env_num):
self.pipe0s[i].send(vec_action[i])
return [pipe0.recv() for pipe0 in self.pipe0s] # list of (state, reward, done)
def run(self, args, env_id):
np.random.seed(args.random_seed)
env = build_env(args.eval_env, if_print=False)
pipe1 = self.pipes[env_id][1]
del args
state = env.reset()
pipe1.send(state)
while True:
action = pipe1.recv()
state, reward, done, _ = env.step(action)
pipe1.send((env.reset() if done else state, reward, done))
# def check(self):
# vec_state = self.reset()
# ten_state = np.array(vec_state)
# print(ten_state.shape)
#
# vec_action = np.array(((0.0, 1.0, 0.0),
# (0.0, 0.5, 0.0),
# (0.0, 0.1, 0.0),))[:self.env_num]
# assert self.env_num <= 3
#
# trajectory_list = list()
# for _ in range(8):
# s_r_d_list = self.step(vec_action)
# ten_state = np.array([s_r_d[0] for s_r_d in s_r_d_list])
# print(ten_state.shape)
# trajectory_list.append(s_r_d_list)
#
# trajectory_list = list(map(list, zip(*trajectory_list))) # 2D-list transpose
# print('| shape of trajectory_list:', len(trajectory_list), len(trajectory_list[0]))
def train_and_evaluate_mp(args, agent_id=0):
process = list()
'''learner'''
learner_num = get_num_learner(args.visible_gpu)
learner_pipe = PipeLearner(learner_num)
for learner_id in range(learner_num):
'''evaluator'''
if learner_id == learner_num - 1:
evaluator_pipe = PipeEvaluator()
process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id)))
else:
evaluator_pipe = None
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
for worker_id in range(args.worker_num):
if args.env_num == 1:
env_pipe = None
else:
env_pipe = PipeVectorEnv(args)
process.extend(env_pipe.process)
process.append(mp.Process(target=worker_pipe.run, args=(args, env_pipe, worker_id, learner_id)))
process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id)))
[(p.start(), time.sleep(0.1)) for p in process]
process[-1].join()
process_safely_terminate(process)
"""Utils"""
def get_num_learner(visible_gpu):
assert isinstance(visible_gpu, str) # visible_gpu may in {'0', '1', '1,', '1,2', '1,2,'}
visible_gpu = eval(visible_gpu)
num_learner = 1 if isinstance(visible_gpu, int) else len(visible_gpu)
return num_learner
def process_safely_terminate(process):
for p in process:
try:
p.kill()
except OSError as e:
print(e)
pass
def get_optim_parameters(optim): # for avg_update_optim()
params_list = list()
for params_dict in optim.state_dict()['state'].values():
params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)])
return params_list
def avg_update_optim(dst_optim, src_optim_param, device):
for dst, src in zip(get_optim_parameters(dst_optim), src_optim_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
# dst.data.copy_(src.data * tau + dst.data * (1 - tau))
def avg_update_net(dst_net, src_net_param, device):
for dst, src in zip(dst_net.parameters(), src_net_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.